use of io.pravega.controller.store.stream.records.StreamSegmentRecord in project pravega by pravega.
the class PersistentStreamBase method sizeBetweenStreamCuts.
@VisibleForTesting
CompletableFuture<Long> sizeBetweenStreamCuts(Map<Long, Long> streamCutFrom, Map<Long, Long> streamCutTo, Set<StreamSegmentRecord> segmentsInBetween, OperationContext context) {
Preconditions.checkNotNull(context, "Operation context cannot be null");
Map<Integer, List<StreamSegmentRecord>> shards = segmentsInBetween.stream().collect(Collectors.groupingBy(x -> getShardNumber(x.segmentId())));
return Futures.allOfWithResults(shards.entrySet().stream().map(entry -> getSealedSegmentSizeMapShard(entry.getKey(), context).thenApply(shardMap -> {
return entry.getValue().stream().collect(Collectors.toMap(x -> x, x -> {
if (shardMap.getSize(x.segmentId()) == null) {
return Long.MIN_VALUE;
} else {
return shardMap.getSize(x.segmentId());
}
}));
})).collect(Collectors.toList())).thenApply(listOfMap -> {
return listOfMap.stream().flatMap(s -> s.entrySet().stream()).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}).thenApply(sizes -> {
AtomicLong sizeTill = new AtomicLong(0L);
sizes.forEach((segment, value) -> {
// segments in both.. to.offset - from.offset
if (streamCutTo.containsKey(segment.segmentId()) && streamCutFrom.containsKey(segment.segmentId())) {
long sizeFrom = Math.max(streamCutTo.get(segment.segmentId()), 0);
long sizeTo = Math.max(streamCutFrom.get(segment.segmentId()), 0);
sizeTill.addAndGet(sizeFrom - sizeTo);
} else if (streamCutTo.containsKey(segment.segmentId())) {
long sizeFrom = Math.max(streamCutTo.get(segment.segmentId()), 0);
// segments only in streamcutTo: take their offsets in streamcut
sizeTill.addAndGet(sizeFrom);
} else if (streamCutFrom.containsKey(segment.segmentId())) {
// segments only in from: take their total size - offset in from
long sizeTo = Math.max(streamCutFrom.get(segment.segmentId()), 0);
sizeTill.addAndGet(Math.max(value, 0) - sizeTo);
} else {
sizeTill.addAndGet(Math.max(value, 0));
}
});
return sizeTill.get();
});
}
use of io.pravega.controller.store.stream.records.StreamSegmentRecord in project pravega by pravega.
the class StreamMetadataStoreTest method scaleWithTxTest.
@Test(timeout = 30000)
public void scaleWithTxTest() throws Exception {
final String scope = "ScopeScaleWithTx";
final String stream = "StreamScaleWithTx";
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
long start = System.currentTimeMillis();
store.createScope(scope, null, executor).get();
store.createStream(scope, stream, configuration, start, null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
long scaleTs = System.currentTimeMillis();
SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.5, 0.75);
SimpleEntry<Double, Double> segment3 = new SimpleEntry<>(0.75, 1.0);
List<Long> scale1SealedSegments = Collections.singletonList(1L);
// region Txn created before scale and during scale
// scale with transaction test
// first txn created before-scale
UUID txnId = store.generateTransactionId(scope, stream, null, executor).join();
VersionedTransactionData tx01 = store.createTransaction(scope, stream, txnId, 100, 100, null, executor).get();
assertEquals(0, tx01.getEpoch());
VersionedMetadata<EpochTransitionRecord> versioned = store.submitScale(scope, stream, scale1SealedSegments, Arrays.asList(segment2, segment3), scaleTs, null, null, executor).join();
EpochTransitionRecord response = versioned.getObject();
Map<Long, Map.Entry<Double, Double>> scale1SegmentsCreated = response.getNewSegmentsWithRange();
final int epoch = response.getActiveEpoch();
assertEquals(0, epoch);
assertNotNull(scale1SegmentsCreated);
VersionedMetadata<State> state = store.getVersionedState(scope, stream, null, executor).join();
state = store.updateVersionedState(scope, stream, State.SCALING, state, null, executor).join();
versioned = store.startScale(scope, stream, false, versioned, state, null, executor).join();
// second txn created after new segments are created in segment table but not yet in history table
// assert that txn is created on old epoch
store.scaleCreateNewEpochs(scope, stream, versioned, null, executor).join();
txnId = store.generateTransactionId(scope, stream, null, executor).join();
VersionedTransactionData tx02 = store.createTransaction(scope, stream, txnId, 100, 100, null, executor).get();
assertEquals(0, tx02.getEpoch());
assertEquals(0, (int) (tx02.getId().getMostSignificantBits() >> 32));
// third transaction created after new epoch created
txnId = store.generateTransactionId(scope, stream, null, executor).join();
store.sealTransaction(scope, stream, tx02.getId(), true, Optional.of(tx02.getVersion()), "", Long.MIN_VALUE, null, executor).get();
store.sealTransaction(scope, stream, tx01.getId(), true, Optional.of(tx01.getVersion()), "", Long.MIN_VALUE, null, executor).get();
store.scaleSegmentsSealed(scope, stream, scale1SealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, null, executor).join();
store.completeScale(scope, stream, versioned, null, executor).join();
VersionedTransactionData tx03 = store.createTransaction(scope, stream, txnId, 100, 100, null, executor).get();
assertEquals(0, tx03.getEpoch());
assertEquals(0, (int) (tx03.getId().getMostSignificantBits() >> 32));
store.setState(scope, stream, State.ACTIVE, null, executor).join();
// ensure that we can commit transactions on old epoch and roll over.
EpochRecord activeEpoch = store.getActiveEpoch(scope, stream, null, true, executor).join();
// submit another scale request without starting the scale
List<Long> scale2SealedSegments = Collections.singletonList(0L);
long scaleTs2 = System.currentTimeMillis();
SimpleEntry<Double, Double> segment4 = new SimpleEntry<>(0.0, 0.25);
SimpleEntry<Double, Double> segment5 = new SimpleEntry<>(0.25, 0.5);
VersionedMetadata<EpochTransitionRecord> versioned2 = store.submitScale(scope, stream, scale2SealedSegments, Arrays.asList(segment4, segment5), scaleTs2, null, null, executor).join();
EpochTransitionRecord response2 = versioned2.getObject();
assertEquals(activeEpoch.getEpoch(), response2.getActiveEpoch());
VersionedMetadata<CommittingTransactionsRecord> record = store.startCommitTransactions(scope, stream, 100, null, executor).join().getKey();
store.setState(scope, stream, State.COMMITTING_TXN, null, executor).join();
record = store.startRollingTxn(scope, stream, activeEpoch.getEpoch(), record, null, executor).join();
store.rollingTxnCreateDuplicateEpochs(scope, stream, Collections.emptyMap(), System.currentTimeMillis(), record, null, executor).join();
store.completeRollingTxn(scope, stream, Collections.emptyMap(), record, null, executor).join();
store.completeCommitTransactions(scope, stream, record, null, executor, Collections.emptyMap()).join();
store.setState(scope, stream, State.ACTIVE, null, executor).join();
activeEpoch = store.getActiveEpoch(scope, stream, null, true, executor).join();
assertEquals(3, activeEpoch.getEpoch());
assertEquals(1, activeEpoch.getReferenceEpoch());
assertEquals(3, activeEpoch.getSegments().size());
List<StreamSegmentRecord> txnDuplicate = store.getSegmentsInEpoch(scope, stream, 2, null, executor).join();
assertEquals(2, txnDuplicate.size());
List<StreamSegmentRecord> activeEpochDuplicate = store.getSegmentsInEpoch(scope, stream, 3, null, executor).join();
assertEquals(3, activeEpochDuplicate.size());
EpochRecord txnCommittedEpoch = store.getEpoch(scope, stream, 2, null, executor).join();
assertEquals(0, txnCommittedEpoch.getReferenceEpoch());
assertEquals(store.transactionStatus(scope, stream, tx01.getId(), null, executor).join(), TxnStatus.COMMITTED);
assertEquals(store.transactionStatus(scope, stream, tx02.getId(), null, executor).join(), TxnStatus.COMMITTED);
assertEquals(store.transactionStatus(scope, stream, tx03.getId(), null, executor).join(), TxnStatus.OPEN);
store.sealTransaction(scope, stream, tx03.getId(), true, Optional.of(tx03.getVersion()), "", Long.MIN_VALUE, null, executor).get();
// endregion
// region verify migrate request for manual scale
// now start manual scale against previously submitted scale request that was on old epoch from before rolling txn.
// verify that it gets migrated to latest duplicate epoch
state = store.getVersionedState(scope, stream, null, executor).join();
state = store.updateVersionedState(scope, stream, State.SCALING, state, null, executor).join();
versioned2 = store.submitScale(scope, stream, scale2SealedSegments, Arrays.asList(segment4, segment5), scaleTs2, null, null, executor).join();
versioned2 = store.startScale(scope, stream, true, versioned2, state, null, executor).join();
store.scaleCreateNewEpochs(scope, stream, versioned2, null, executor).join();
txnId = store.generateTransactionId(scope, stream, null, executor).join();
VersionedTransactionData tx14 = store.createTransaction(scope, stream, txnId, 100, 100, null, executor).get();
assertEquals(1, tx14.getEpoch());
store.sealTransaction(scope, stream, tx14.getId(), true, Optional.of(tx14.getVersion()), "", Long.MIN_VALUE, null, executor).get();
// verify that new txns can be created and are created on original epoch
txnId = store.generateTransactionId(scope, stream, null, executor).join();
VersionedTransactionData tx15 = store.createTransaction(scope, stream, txnId, 100, 100, null, executor).get();
assertEquals(1, tx15.getEpoch());
store.scaleCreateNewEpochs(scope, stream, versioned2, null, executor).join();
store.scaleSegmentsSealed(scope, stream, Collections.emptyMap(), versioned2, null, executor).join();
store.completeScale(scope, stream, versioned2, null, executor).join();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
activeEpoch = store.getActiveEpoch(scope, stream, null, true, executor).join();
assertEquals(4, activeEpoch.getEpoch());
assertEquals(4, activeEpoch.getReferenceEpoch());
store.sealTransaction(scope, stream, tx15.getId(), true, Optional.of(tx15.getVersion()), "", Long.MIN_VALUE, null, executor).get();
record = store.startCommitTransactions(scope, stream, 100, null, executor).join().getKey();
store.setState(scope, stream, State.COMMITTING_TXN, null, executor).get();
record = store.startRollingTxn(scope, stream, activeEpoch.getEpoch(), record, null, executor).join();
store.rollingTxnCreateDuplicateEpochs(scope, stream, Collections.emptyMap(), System.currentTimeMillis(), record, null, executor).join();
store.completeRollingTxn(scope, stream, Collections.emptyMap(), record, null, executor).join();
store.completeCommitTransactions(scope, stream, record, null, executor, Collections.emptyMap()).join();
store.setState(scope, stream, State.ACTIVE, null, executor).join();
activeEpoch = store.getActiveEpoch(scope, stream, null, true, executor).join();
assertEquals(6, activeEpoch.getEpoch());
assertEquals(4, activeEpoch.getReferenceEpoch());
// endregion
}
use of io.pravega.controller.store.stream.records.StreamSegmentRecord in project pravega by pravega.
the class ZkStreamTest method testZkStream.
@Test(timeout = 30000)
public void testZkStream() throws Exception {
double keyChunk = 1.0 / 5;
final ScalingPolicy policy = ScalingPolicy.fixed(5);
@Cleanup final StreamMetadataStore store = new ZKStreamMetadataStore(cli, executor);
final String streamName = "test";
store.createScope(SCOPE, null, executor).get();
StreamConfiguration streamConfig = StreamConfiguration.builder().scalingPolicy(policy).build();
store.createStream(SCOPE, streamName, streamConfig, System.currentTimeMillis(), null, executor).get();
store.setState(SCOPE, streamName, State.ACTIVE, null, executor).get();
OperationContext context = store.createStreamContext(SCOPE, streamName, 0L);
// set minimum number of segments to 1 so that we can also test scale downs
streamConfig = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
store.startUpdateConfiguration(SCOPE, streamName, streamConfig, null, executor).join();
VersionedMetadata<StreamConfigurationRecord> configRecord = store.getConfigurationRecord(SCOPE, streamName, null, executor).join();
store.completeUpdateConfiguration(SCOPE, streamName, configRecord, null, executor).join();
List<StreamSegmentRecord> segments = store.getActiveSegments(SCOPE, streamName, context, executor).get();
assertEquals(segments.size(), 5);
assertTrue(segments.stream().allMatch(x -> Lists.newArrayList(0L, 1L, 2L, 3L, 4L).contains(x.segmentId())));
long start = segments.get(0).getCreationTime();
assertEquals(store.getConfiguration(SCOPE, streamName, context, executor).get(), streamConfig);
List<Map.Entry<Double, Double>> newRanges;
// existing range 0 = 0 - .2, 1 = .2 - .4, 2 = .4 - .6, 3 = .6 - .8, 4 = .8 - 1.0
// 3, 4 -> 5 = .6 - 1.0
newRanges = Collections.singletonList(new AbstractMap.SimpleEntry<>(3 * keyChunk, 1.0));
long scale1 = start + 10000;
ArrayList<Long> sealedSegments = Lists.newArrayList(3L, 4L);
long five = computeSegmentId(5, 1);
VersionedMetadata<EpochTransitionRecord> versioned = store.submitScale(SCOPE, streamName, sealedSegments, newRanges, scale1, null, context, executor).get();
VersionedMetadata<State> state = store.getVersionedState(SCOPE, streamName, null, executor).join();
state = store.updateVersionedState(SCOPE, streamName, State.SCALING, state, null, executor).join();
versioned = store.startScale(SCOPE, streamName, false, versioned, state, null, executor).join();
store.scaleCreateNewEpochs(SCOPE, streamName, versioned, context, executor).get();
store.scaleSegmentsSealed(SCOPE, streamName, sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, context, executor).get();
store.completeScale(SCOPE, streamName, versioned, null, executor).join();
store.setState(SCOPE, streamName, State.ACTIVE, null, executor).join();
segments = store.getActiveSegments(SCOPE, streamName, context, executor).get();
assertEquals(segments.size(), 4);
assertTrue(segments.stream().allMatch(x -> Lists.newArrayList(0L, 1L, 2L, five).contains(x.segmentId())));
// 1 -> 6 = 0.2 -.3, 7 = .3 - .4
// 2,5 -> 8 = .4 - 1.0
newRanges = Arrays.asList(new AbstractMap.SimpleEntry<>(keyChunk, 0.3), new AbstractMap.SimpleEntry<>(0.3, 2 * keyChunk), new AbstractMap.SimpleEntry<>(2 * keyChunk, 1.0));
long scale2 = scale1 + 10000;
ArrayList<Long> sealedSegments1 = Lists.newArrayList(1L, 2L, five);
long six = computeSegmentId(6, 2);
long seven = computeSegmentId(7, 2);
long eight = computeSegmentId(8, 2);
versioned = store.submitScale(SCOPE, streamName, sealedSegments1, newRanges, scale2, null, context, executor).get();
EpochTransitionRecord response = versioned.getObject();
state = store.getVersionedState(SCOPE, streamName, null, executor).join();
state = store.updateVersionedState(SCOPE, streamName, State.SCALING, state, null, executor).join();
versioned = store.startScale(SCOPE, streamName, false, versioned, state, null, executor).join();
store.scaleCreateNewEpochs(SCOPE, streamName, versioned, context, executor).get();
store.scaleSegmentsSealed(SCOPE, streamName, sealedSegments1.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, context, executor).get();
store.completeScale(SCOPE, streamName, versioned, null, executor).join();
store.setState(SCOPE, streamName, State.ACTIVE, null, executor).join();
segments = store.getActiveSegments(SCOPE, streamName, context, executor).get();
assertEquals(segments.size(), 4);
assertTrue(segments.stream().allMatch(x -> Lists.newArrayList(0L, six, seven, eight).contains(x.segmentId())));
// 7 -> 9 = .3 - .35, 10 = .35 - .6
// 8 -> 10 = .35 - .6, 11 = .6 - 1.0
newRanges = Arrays.asList(new AbstractMap.SimpleEntry<>(0.3, 0.35), new AbstractMap.SimpleEntry<>(0.35, 3 * keyChunk), new AbstractMap.SimpleEntry<>(3 * keyChunk, 1.0));
long scale3 = scale2 + 10000;
long nine = computeSegmentId(9, 3);
long ten = computeSegmentId(10, 3);
long eleven = computeSegmentId(11, 3);
ArrayList<Long> sealedSegments2 = Lists.newArrayList(seven, eight);
versioned = store.submitScale(SCOPE, streamName, sealedSegments2, newRanges, scale3, null, context, executor).get();
response = versioned.getObject();
state = store.getVersionedState(SCOPE, streamName, null, executor).join();
state = store.updateVersionedState(SCOPE, streamName, State.SCALING, state, null, executor).join();
store.startScale(SCOPE, streamName, false, versioned, state, null, executor).join();
store.scaleCreateNewEpochs(SCOPE, streamName, versioned, context, executor).get();
store.scaleSegmentsSealed(SCOPE, streamName, sealedSegments2.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, context, executor).get();
store.completeScale(SCOPE, streamName, versioned, null, executor).join();
store.setState(SCOPE, streamName, State.ACTIVE, null, executor).join();
segments = store.getActiveSegments(SCOPE, streamName, context, executor).get();
assertEquals(segments.size(), 5);
assertTrue(segments.stream().allMatch(x -> Lists.newArrayList(0L, six, nine, ten, eleven).contains(x.segmentId())));
Map<Long, List<Long>> successors = store.getSuccessors(SCOPE, streamName, 0L, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.isEmpty());
successors = store.getSuccessors(SCOPE, streamName, 1L, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.size() == 2 && successors.containsKey(six) && successors.get(six).containsAll(Collections.singleton(1L)) && successors.containsKey(seven) && successors.get(seven).containsAll(Collections.singleton(1L)));
successors = store.getSuccessors(SCOPE, streamName, 2L, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.size() == 1 && successors.containsKey(eight) && successors.get(eight).containsAll(Lists.newArrayList(2L, five)));
successors = store.getSuccessors(SCOPE, streamName, 3L, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.size() == 1 && successors.containsKey(five) && successors.get(five).containsAll(Lists.newArrayList(3L, 4L)));
successors = store.getSuccessors(SCOPE, streamName, 4L, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.size() == 1 && successors.containsKey(five) && successors.get(five).containsAll(Lists.newArrayList(3L, 4L)));
successors = store.getSuccessors(SCOPE, streamName, five, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.size() == 1 && successors.containsKey(eight) && successors.get(eight).containsAll(Lists.newArrayList(2L, five)));
successors = store.getSuccessors(SCOPE, streamName, six, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.isEmpty());
successors = store.getSuccessors(SCOPE, streamName, seven, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.size() == 2 && successors.containsKey(nine) && successors.get(nine).containsAll(Collections.singleton(seven)) && successors.containsKey(ten) && successors.get(ten).containsAll(Lists.newArrayList(seven, eight)));
successors = store.getSuccessors(SCOPE, streamName, eight, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.size() == 2 && successors.containsKey(eleven) && successors.get(eleven).containsAll(Collections.singleton(eight)) && successors.containsKey(ten) && successors.get(ten).containsAll(Lists.newArrayList(seven, eight)));
successors = store.getSuccessors(SCOPE, streamName, nine, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.isEmpty());
successors = store.getSuccessors(SCOPE, streamName, ten, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.isEmpty());
successors = store.getSuccessors(SCOPE, streamName, eleven, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.isEmpty());
// start -1
Map<Long, Long> historicalSegments = store.getSegmentsAtHead(SCOPE, streamName, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertEquals(historicalSegments.size(), 5);
assertTrue(historicalSegments.keySet().containsAll(Lists.newArrayList(0L, 1L, 2L, 3L, 4L)));
// start + 1
List<Long> segmentsInEpoch = store.getSegmentsInEpoch(SCOPE, streamName, 0, context, executor).get().stream().map(x -> x.segmentId()).collect(Collectors.toList());
assertEquals(segmentsInEpoch.size(), 5);
assertTrue(segmentsInEpoch.containsAll(Lists.newArrayList(0L, 1L, 2L, 3L, 4L)));
// scale1
segmentsInEpoch = store.getSegmentsInEpoch(SCOPE, streamName, 1, context, executor).get().stream().map(x -> x.segmentId()).collect(Collectors.toList());
assertEquals(segmentsInEpoch.size(), 4);
assertTrue(segmentsInEpoch.containsAll(Lists.newArrayList(0L, 1L, 2L, five)));
// scale2
segmentsInEpoch = store.getSegmentsInEpoch(SCOPE, streamName, 2, context, executor).get().stream().map(x -> x.segmentId()).collect(Collectors.toList());
assertEquals(segmentsInEpoch.size(), 4);
assertTrue(segmentsInEpoch.containsAll(Lists.newArrayList(0L, six, seven, eight)));
// scale3
segmentsInEpoch = store.getSegmentsInEpoch(SCOPE, streamName, 3, context, executor).get().stream().map(x -> x.segmentId()).collect(Collectors.toList());
assertEquals(segmentsInEpoch.size(), 5);
assertTrue(segmentsInEpoch.containsAll(Lists.newArrayList(0L, six, nine, ten, eleven)));
assertFalse(store.isSealed(SCOPE, streamName, context, executor).get());
assertNotEquals(0, store.getActiveSegments(SCOPE, streamName, context, executor).get().size());
store.setSealed(SCOPE, streamName, context, executor).get();
assertTrue(store.isSealed(SCOPE, streamName, context, executor).get());
assertEquals(0, store.getActiveSegments(SCOPE, streamName, context, executor).get().size());
// seal an already sealed stream.
store.setSealed(SCOPE, streamName, context, executor).get();
assertTrue(store.isSealed(SCOPE, streamName, context, executor).get());
assertEquals(0, store.getActiveSegments(SCOPE, streamName, context, executor).get().size());
// seal a non existing stream.
AssertExtensions.assertFutureThrows("", store.setSealed(SCOPE, "nonExistentStream", null, executor), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException);
store.markCold(SCOPE, streamName, 0L, System.currentTimeMillis() + 1000, null, executor).get();
assertTrue(store.isCold(SCOPE, streamName, 0L, null, executor).get());
Thread.sleep(1000);
assertFalse(store.isCold(SCOPE, streamName, 0L, null, executor).get());
store.markCold(SCOPE, streamName, 0L, System.currentTimeMillis() + 1000, null, executor).get();
store.removeMarker(SCOPE, streamName, 0L, null, executor).get();
assertFalse(store.isCold(SCOPE, streamName, 0L, null, executor).get());
}
use of io.pravega.controller.store.stream.records.StreamSegmentRecord in project pravega by pravega.
the class StreamTestBase method testSegmentQueriesDuringScale.
@Test(timeout = 30000L)
public void testSegmentQueriesDuringScale() {
OperationContext context = getContext();
// start scale and perform `getSegment`, `getActiveEpoch` and `getEpoch` during different phases of scale
int startingSegmentNumber = new Random().nextInt(20);
Stream stream = createStream("scope", "stream" + startingSegmentNumber, System.currentTimeMillis(), 5, startingSegmentNumber);
StreamSegmentRecord segment = stream.getSegment(startingSegmentNumber, context).join();
assertEquals(segment.segmentId(), startingSegmentNumber + 0L);
assertEquals(segment.getKeyStart(), 0, 0);
assertEquals(segment.getKeyEnd(), 1.0 / 5, 0);
long segment5 = computeSegmentId(startingSegmentNumber + 5, 1);
long segment6 = computeSegmentId(startingSegmentNumber + 6, 1);
long segment7 = computeSegmentId(startingSegmentNumber + 7, 1);
long segment8 = computeSegmentId(startingSegmentNumber + 8, 1);
long segment9 = computeSegmentId(startingSegmentNumber + 9, 1);
List<Long> newSegments = Lists.newArrayList(segment5, segment6, segment7, segment8, segment9);
List<StreamSegmentRecord> originalSegments = stream.getActiveSegments(context).join();
List<Long> segmentsToSeal = originalSegments.stream().map(StreamSegmentRecord::segmentId).collect(Collectors.toList());
List<Map.Entry<Double, Double>> newRanges = originalSegments.stream().map(x -> new AbstractMap.SimpleEntry<>(x.getKeyStart(), x.getKeyEnd())).collect(Collectors.toList());
VersionedMetadata<EpochTransitionRecord> etr = stream.getEpochTransition(context).join();
// submit scale
etr = stream.submitScale(segmentsToSeal, newRanges, System.currentTimeMillis(), etr, context).join();
VersionedMetadata<State> state = stream.getVersionedState(context).thenCompose(s -> stream.updateVersionedState(s, State.SCALING, context)).join();
etr = stream.startScale(true, etr, state, context).join();
List<StreamSegmentRecord> newCurrentSegments = stream.getActiveSegments(context).join();
assertEquals(originalSegments, newCurrentSegments);
AssertExtensions.assertSuppliedFutureThrows("", () -> stream.getSegment(segment9, context), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException);
Map<StreamSegmentRecord, List<Long>> successorsWithPredecessors = stream.getSuccessorsWithPredecessors(0L, context).join();
assertTrue(successorsWithPredecessors.isEmpty());
// scale create new epochs
stream.scaleCreateNewEpoch(etr, context).join();
newCurrentSegments = stream.getActiveSegments(context).join();
assertEquals(originalSegments, newCurrentSegments);
segment = stream.getSegment(segment9, context).join();
assertEquals(computeSegmentId(startingSegmentNumber + 9, 1), segment.segmentId());
assertEquals(segment.getKeyStart(), 1.0 / 5 * 4, 0);
assertEquals(segment.getKeyEnd(), 1.0, 0);
successorsWithPredecessors = stream.getSuccessorsWithPredecessors(startingSegmentNumber + 0L, context).join();
Set<StreamSegmentRecord> successors = successorsWithPredecessors.keySet();
assertEquals(1, successors.size());
StreamSegmentRecord five = successors.stream().findAny().get();
assertEquals(computeSegmentId(startingSegmentNumber + 5, 1), five.segmentId());
List<Long> predecessors = successorsWithPredecessors.get(five);
assertEquals(1, predecessors.size());
assertTrue(predecessors.contains(startingSegmentNumber + 0L));
// scale old segments sealed
stream.scaleOldSegmentsSealed(Collections.emptyMap(), etr, context).join();
newCurrentSegments = stream.getActiveSegments(context).join();
assertEquals(new HashSet<>(newSegments), newCurrentSegments.stream().map(StreamSegmentRecord::segmentId).collect(Collectors.toSet()));
segment = stream.getSegment(segment9, context).join();
assertEquals(computeSegmentId(startingSegmentNumber + 9, 1), segment.segmentId());
assertEquals(segment.getKeyStart(), 1.0 / 5 * 4, 0);
assertEquals(segment.getKeyEnd(), 1.0, 0);
// complete scale
stream.completeScale(etr, context).join();
segment = stream.getSegment(segment9, context).join();
assertEquals(computeSegmentId(startingSegmentNumber + 9, 1), segment.segmentId());
assertEquals(segment.getKeyStart(), 1.0 / 5 * 4, 0);
assertEquals(segment.getKeyEnd(), 1.0, 0);
}
use of io.pravega.controller.store.stream.records.StreamSegmentRecord in project pravega by pravega.
the class StreamTestBase method createScaleAndRollStreamForMultiChunkTests.
// region multiple chunks test
private PersistentStreamBase createScaleAndRollStreamForMultiChunkTests(String name, String scope, int startingSegmentNumber, Supplier<Long> time) {
OperationContext context = getContext();
createScope(scope, context);
PersistentStreamBase stream = createStream(scope, name, time.get(), 5, startingSegmentNumber, 2, 2);
UUID txnId = createAndCommitTransaction(stream, 0, 0L);
// scale the stream 5 times so that over all we have 6 epochs and hence 3 chunks.
for (int i = 0; i < 5; i++) {
StreamSegmentRecord first = stream.getActiveSegments(context).join().get(0);
ArrayList<Long> sealedSegments = Lists.newArrayList(first.segmentId());
List<Map.Entry<Double, Double>> newRanges = new LinkedList<>();
newRanges.add(new AbstractMap.SimpleEntry<>(first.getKeyStart(), first.getKeyEnd()));
Map<Long, Long> sealedSizeMap = new HashMap<>();
sealedSizeMap.put(first.segmentId(), 100L);
scaleStream(stream, time.get(), sealedSegments, newRanges, sealedSizeMap);
}
EpochRecord activeEpoch = stream.getActiveEpoch(true, context).join();
// now roll transaction so that we have 2 more epochs added for overall 8 epochs and 4 chunks
Map<Long, Long> map1 = stream.getEpochRecord(0, context).join().getSegmentIds().stream().collect(Collectors.toMap(x -> computeSegmentId(NameUtils.getSegmentNumber(x), activeEpoch.getEpoch() + 1), x -> 100L));
Map<Long, Long> map2 = activeEpoch.getSegmentIds().stream().collect(Collectors.toMap(x -> x, x -> 100L));
rollTransactions(stream, time.get(), 0, activeEpoch.getEpoch(), map1, map2);
// scale the stream 5 times so that over all we have 13 epochs and hence 7 chunks.
for (int i = 0; i < 5; i++) {
StreamSegmentRecord first = stream.getActiveSegments(context).join().get(0);
ArrayList<Long> sealedSegments = Lists.newArrayList(first.segmentId());
List<Map.Entry<Double, Double>> newRanges = new LinkedList<>();
newRanges.add(new AbstractMap.SimpleEntry<>(first.getKeyStart(), first.getKeyEnd()));
Map<Long, Long> sealedSizeMap = new HashMap<>();
sealedSizeMap.put(first.segmentId(), 100L);
scaleStream(stream, time.get(), sealedSegments, newRanges, sealedSizeMap);
}
return stream;
}
Aggregations