use of io.pravega.controller.store.stream.records.StreamSegmentRecord in project pravega by pravega.
the class PersistentStreamBase method computeStreamCutSpanInternal.
private ImmutableMap<StreamSegmentRecord, Integer> computeStreamCutSpanInternal(Map<Long, Long> streamCut, int epochLow, int epochHigh, List<EpochRecord> epochs) {
List<Long> toFind = new ArrayList<>(streamCut.keySet());
ImmutableMap.Builder<StreamSegmentRecord, Integer> resultSet = ImmutableMap.builder();
for (int i = epochHigh - epochLow; i >= 0; i--) {
if (toFind.isEmpty()) {
break;
}
EpochRecord epochRecord = epochs.get(i);
Set<Long> epochSegments = epochRecord.getSegmentIds();
List<Long> found = toFind.stream().filter(epochSegments::contains).collect(Collectors.toList());
resultSet.putAll(found.stream().collect(Collectors.toMap(x -> epochRecord.getSegments().stream().filter(z -> z.segmentId() == x).findFirst().get(), x -> epochRecord.getEpoch())));
toFind.removeAll(epochSegments);
}
return resultSet.build();
}
use of io.pravega.controller.store.stream.records.StreamSegmentRecord in project pravega by pravega.
the class PersistentStreamBase method rollingTxnCreateDuplicateEpochs.
@Override
public CompletableFuture<Void> rollingTxnCreateDuplicateEpochs(Map<Long, Long> sealedTxnEpochSegments, long time, VersionedMetadata<CommittingTransactionsRecord> record, OperationContext context) {
Preconditions.checkNotNull(context, "Operation context cannot be null");
Preconditions.checkArgument(record.getObject().isRollingTxnRecord());
CommittingTransactionsRecord committingTxnRecord = record.getObject();
return getActiveEpoch(true, context).thenCompose(activeEpochRecord -> getEpochRecord(committingTxnRecord.getEpoch(), context).thenCompose(transactionEpochRecord -> {
if (activeEpochRecord.getEpoch() > committingTxnRecord.getCurrentEpoch()) {
log.debug(context.getRequestId(), "Duplicate Epochs {} already created. Ignore.", committingTxnRecord.getNewActiveEpoch());
return CompletableFuture.completedFuture(null);
}
long timeStamp = Math.max(activeEpochRecord.getCreationTime() + 1, time);
ImmutableList.Builder<StreamSegmentRecord> duplicateTxnSegmentsBuilder = ImmutableList.builder();
transactionEpochRecord.getSegments().stream().forEach(x -> duplicateTxnSegmentsBuilder.add(newSegmentRecord(computeSegmentId(getSegmentNumber(x.segmentId()), committingTxnRecord.getNewTxnEpoch()), timeStamp, x.getKeyStart(), x.getKeyEnd())));
ImmutableList.Builder<StreamSegmentRecord> duplicateActiveSegmentsBuilder = ImmutableList.builder();
activeEpochRecord.getSegments().stream().forEach(x -> duplicateActiveSegmentsBuilder.add(newSegmentRecord(computeSegmentId(getSegmentNumber(x.segmentId()), committingTxnRecord.getNewActiveEpoch()), timeStamp + 1, x.getKeyStart(), x.getKeyEnd())));
CompletableFuture<EpochRecord> txnEpochFuture = getSplitMergeCountsTillEpoch(activeEpochRecord, context).thenCompose(txnSplitMergeCount -> {
ImmutableList<StreamSegmentRecord> duplicateTxnEpochSegments = duplicateTxnSegmentsBuilder.build();
EpochRecord duplicateTxnEpoch = new EpochRecord(committingTxnRecord.getNewTxnEpoch(), transactionEpochRecord.getReferenceEpoch(), duplicateTxnEpochSegments, timeStamp, getNewEpochSplitCount(txnSplitMergeCount.getKey(), activeEpochRecord.getSegments(), duplicateTxnEpochSegments), getNewEpochMergeCount(txnSplitMergeCount.getValue(), activeEpochRecord.getSegments(), duplicateTxnEpochSegments));
return CompletableFuture.completedFuture(duplicateTxnEpoch);
});
CompletableFuture<EpochRecord> activeEpochFuture = txnEpochFuture.thenCompose(previousEpoch -> getSplitMergeCountsTillEpoch(previousEpoch, context).thenCompose(prevSplitMergeCounts -> {
ImmutableList<StreamSegmentRecord> activeEpochSegments = duplicateActiveSegmentsBuilder.build();
EpochRecord duplicateActiveEpoch = new EpochRecord(committingTxnRecord.getNewActiveEpoch(), activeEpochRecord.getReferenceEpoch(), activeEpochSegments, timeStamp + 1, getNewEpochSplitCount(prevSplitMergeCounts.getKey(), previousEpoch.getSegments(), activeEpochSegments), getNewEpochMergeCount(prevSplitMergeCounts.getValue(), previousEpoch.getSegments(), activeEpochSegments));
return CompletableFuture.completedFuture(duplicateActiveEpoch);
}));
return CompletableFuture.allOf(txnEpochFuture, activeEpochFuture).thenCompose(v -> {
EpochRecord duplicateTxnEpoch = txnEpochFuture.join();
EpochRecord duplicateActiveEpoch = activeEpochFuture.join();
HistoryTimeSeriesRecord timeSeriesRecordTxnEpoch = new HistoryTimeSeriesRecord(duplicateTxnEpoch.getEpoch(), duplicateTxnEpoch.getReferenceEpoch(), ImmutableList.of(), ImmutableList.of(), timeStamp);
HistoryTimeSeriesRecord timeSeriesRecordActiveEpoch = new HistoryTimeSeriesRecord(duplicateActiveEpoch.getEpoch(), duplicateActiveEpoch.getReferenceEpoch(), ImmutableList.of(), ImmutableList.of(), timeStamp + 1);
return createEpochRecord(duplicateTxnEpoch, context).thenCompose(x -> updateHistoryTimeSeries(timeSeriesRecordTxnEpoch, context)).thenCompose(x -> createEpochRecord(duplicateActiveEpoch, context)).thenCompose(x -> updateHistoryTimeSeries(timeSeriesRecordActiveEpoch, context)).thenCompose(x -> createSegmentSealedEpochRecords(activeEpochRecord.getSegments().stream().map(StreamSegmentRecord::segmentId).collect(Collectors.toList()), duplicateTxnEpoch.getEpoch(), context)).thenCompose(x -> createSegmentSealedEpochRecords(duplicateTxnEpoch.getSegments().stream().map(StreamSegmentRecord::segmentId).collect(Collectors.toList()), duplicateActiveEpoch.getEpoch(), context));
}).thenCompose(r -> updateSealedSegmentSizes(sealedTxnEpochSegments, context));
}));
}
use of io.pravega.controller.store.stream.records.StreamSegmentRecord in project pravega by pravega.
the class PersistentStreamBase method compareStreamCuts.
@Override
public CompletableFuture<StreamCutComparison> compareStreamCuts(Map<Long, Long> streamcut1, Map<Long, Long> streamcut2, OperationContext context) {
Preconditions.checkNotNull(context, "operation context cannot be null");
LongSummaryStatistics stats1 = streamcut1.keySet().stream().collect(Collectors.summarizingLong(Long::longValue));
LongSummaryStatistics stats2 = streamcut1.keySet().stream().collect(Collectors.summarizingLong(Long::longValue));
// streamcuts at all and can simply return the response.
if (stats1.getMax() < stats2.getMin()) {
// stats1 less than stats2
return CompletableFuture.completedFuture(StreamCutComparison.Before);
} else if (stats2.getMax() < stats1.getMin()) {
// stats2 less than min
return CompletableFuture.completedFuture(StreamCutComparison.EqualOrAfter);
}
CompletableFuture<ImmutableMap<StreamSegmentRecord, Integer>> span1Future = computeStreamCutSpan(streamcut1, context);
CompletableFuture<ImmutableMap<StreamSegmentRecord, Integer>> span2Future = computeStreamCutSpan(streamcut2, context);
return CompletableFuture.allOf(span1Future, span2Future).thenApply(v -> {
ImmutableMap<StreamSegmentRecord, Integer> span1 = span1Future.join();
ImmutableMap<StreamSegmentRecord, Integer> span2 = span2Future.join();
// loop over all segments in streamcut1 and compare them with segments in streamcut2.
// if we find all segments in streamcut1 greater than or equal to all segments in streamcut2
boolean foundGt = false;
boolean foundLt = false;
for (Map.Entry<StreamSegmentRecord, Integer> e1 : span1.entrySet()) {
for (Map.Entry<StreamSegmentRecord, Integer> e2 : span2.entrySet()) {
int comparison;
if (e2.getKey().segmentId() == e1.getKey().segmentId()) {
// same segment. compare offsets
comparison = Long.compare(streamcut1.get(e1.getKey().segmentId()), streamcut2.get(e2.getKey().segmentId()));
} else if (e2.getKey().overlaps(e1.getKey())) {
// overlapping segment. compare segment id.
comparison = Long.compare(e1.getKey().segmentId(), e2.getKey().segmentId());
} else {
continue;
}
foundGt = !foundGt ? comparison > 0 : foundGt;
foundLt = !foundLt ? comparison < 0 : foundLt;
}
}
if (foundGt) {
if (foundLt) {
// some segments are greater and some less. return overlapping
return StreamCutComparison.Overlaps;
} else {
// segments are only greater or equal.
return StreamCutComparison.EqualOrAfter;
}
} else {
if (foundLt) {
// no segment greater than but some segment less than.
return StreamCutComparison.Before;
} else {
// no segment greater than no segment less than. this means all segments are equal.
return StreamCutComparison.EqualOrAfter;
}
}
});
}
use of io.pravega.controller.store.stream.records.StreamSegmentRecord in project pravega by pravega.
the class PravegaTablesScaleRequestHandlerTest method testEpochMigration.
@Test(timeout = 30000)
public void testEpochMigration() throws ExecutionException, InterruptedException {
final String scope = "scopeEpoch";
streamStore.createScope(scope, null, executor).get();
final String testStream = "streamEpoch";
final String epoch0Key = "epochRecord-0";
long creationTime = System.currentTimeMillis();
StreamSegmentRecord segRecord = new StreamSegmentRecord(0, 0, creationTime, 0.0, 1.0);
EpochRecord firstEpochInOldFormat = new EpochRecord(0, 0, ImmutableList.of(segRecord), creationTime, EpochRecord.DEFAULT_COUNT_VALUE, EpochRecord.DEFAULT_COUNT_VALUE);
VersionedMetadata<EpochRecord> expectedEpochRecord = new VersionedMetadata<>(firstEpochInOldFormat, new Version.IntVersion(0));
doReturn(CompletableFuture.completedFuture(expectedEpochRecord)).when(storeHelper).getCachedOrLoad(anyString(), eq(epoch0Key), any(), anyLong(), anyLong());
ScaleOperationTask scaleRequestHandler = new ScaleOperationTask(streamMetadataTasks, streamStore, executor);
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 1)).build();
streamStore.createStream(scope, testStream, config, System.currentTimeMillis(), null, executor).join();
streamStore.setState(scope, testStream, State.ACTIVE, null, executor).join();
assertEquals(firstEpochInOldFormat, streamStore.getEpoch(scope, testStream, 0, null, executor).join());
ArrayList<Map.Entry<Double, Double>> newRange = new ArrayList<>();
newRange.add(new AbstractMap.SimpleEntry<>(0.0, 1.0));
// start with manual scale
ScaleOpEvent event = new ScaleOpEvent(scope, testStream, Lists.newArrayList(0L), newRange, true, System.currentTimeMillis(), System.currentTimeMillis());
streamStore.submitScale(scope, testStream, Lists.newArrayList(0L), new ArrayList<>(newRange), System.currentTimeMillis(), null, null, executor).join();
// perform scaling
scaleRequestHandler.execute(event).join();
assertEquals(State.ACTIVE, streamStore.getState(scope, testStream, true, null, executor).join());
assertEquals(1, streamStore.getActiveEpoch(scope, testStream, null, true, executor).join().getEpoch());
}
use of io.pravega.controller.store.stream.records.StreamSegmentRecord in project pravega by pravega.
the class ScaleRequestHandlerTest method testScaleRequest.
@SuppressWarnings("unchecked")
@Test(timeout = 30000)
public void testScaleRequest() throws ExecutionException, InterruptedException {
AutoScaleTask requestHandler = new AutoScaleTask(streamMetadataTasks, streamStore, executor);
ScaleOperationTask scaleRequestHandler = new ScaleOperationTask(streamMetadataTasks, streamStore, executor);
StreamRequestHandler multiplexer = new StreamRequestHandler(requestHandler, scaleRequestHandler, null, null, null, null, null, null, null, streamStore, null, executor);
// Send number of splits = 1
EventWriterMock writer = new EventWriterMock();
streamMetadataTasks.setRequestEventWriter(writer);
AutoScaleEvent scaleUpEvent = new AutoScaleEvent(scope, stream, 2, AutoScaleEvent.UP, System.currentTimeMillis(), 1, false, System.currentTimeMillis());
assertTrue(Futures.await(multiplexer.process(scaleUpEvent, () -> false)));
// verify that one scaleOp event is written into the stream
assertEquals(1, writer.queue.size());
ControllerEvent event = writer.queue.take();
assertTrue(event instanceof ScaleOpEvent);
ScaleOpEvent scaleOpEvent = (ScaleOpEvent) event;
double start = 2.0 / 3.0;
double end = 1.0;
double middle = (start + end) / 2;
assertEquals(2, scaleOpEvent.getNewRanges().size());
double delta = 0.0000000000001;
assertEquals(start, scaleOpEvent.getNewRanges().get(0).getKey(), delta);
assertEquals(middle, scaleOpEvent.getNewRanges().get(0).getValue(), delta);
assertEquals(middle, scaleOpEvent.getNewRanges().get(1).getKey(), delta);
assertEquals(end, scaleOpEvent.getNewRanges().get(1).getValue(), delta);
assertEquals(1, scaleOpEvent.getSegmentsToSeal().size());
assertTrue(scaleOpEvent.getSegmentsToSeal().contains(2L));
assertTrue(Futures.await(multiplexer.process(scaleOpEvent, () -> false)));
// verify that the event is processed successfully
List<StreamSegmentRecord> activeSegments = streamStore.getActiveSegments(scope, stream, null, executor).get();
assertTrue(activeSegments.stream().noneMatch(z -> z.segmentId() == 2L));
// verify that two splits are created even when we sent 1 as numOfSplits in AutoScaleEvent.
long three = computeSegmentId(3, 1);
long four = computeSegmentId(4, 1);
assertTrue(activeSegments.stream().anyMatch(z -> z.segmentId() == three));
assertTrue(activeSegments.stream().anyMatch(z -> z.segmentId() == four));
assertTrue(activeSegments.size() == 4);
// process first scale down event. it should only mark the segment as cold
AutoScaleEvent scaleDownEvent = new AutoScaleEvent(scope, stream, four, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis());
assertTrue(Futures.await(multiplexer.process(scaleDownEvent, () -> false)));
assertTrue(writer.queue.isEmpty());
activeSegments = streamStore.getActiveSegments(scope, stream, null, executor).get();
assertTrue(activeSegments.stream().anyMatch(z -> z.segmentId() == four));
assertTrue(activeSegments.size() == 4);
assertTrue(streamStore.isCold(scope, stream, four, null, executor).join());
AutoScaleEvent scaleDownEvent2 = new AutoScaleEvent(scope, stream, three, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis());
assertTrue(Futures.await(multiplexer.process(scaleDownEvent2, () -> false)));
assertTrue(streamStore.isCold(scope, stream, three, null, executor).join());
// verify that a new event has been posted
assertEquals(1, writer.queue.size());
event = writer.queue.take();
assertTrue(event instanceof ScaleOpEvent);
scaleOpEvent = (ScaleOpEvent) event;
assertEquals(1, scaleOpEvent.getNewRanges().size());
assertEquals(start, scaleOpEvent.getNewRanges().get(0).getKey(), delta);
assertEquals(end, scaleOpEvent.getNewRanges().get(0).getValue(), delta);
assertEquals(2, scaleOpEvent.getSegmentsToSeal().size());
assertTrue(scaleOpEvent.getSegmentsToSeal().contains(three));
assertTrue(scaleOpEvent.getSegmentsToSeal().contains(four));
// process scale down event
assertTrue(Futures.await(multiplexer.process(scaleOpEvent, () -> false)));
long five = computeSegmentId(5, 2);
activeSegments = streamStore.getActiveSegments(scope, stream, null, executor).get();
assertTrue(activeSegments.stream().noneMatch(z -> z.segmentId() == three));
assertTrue(activeSegments.stream().noneMatch(z -> z.segmentId() == four));
assertTrue(activeSegments.stream().anyMatch(z -> z.segmentId() == five));
assertTrue(activeSegments.size() == 3);
// make it throw a non retryable failure so that test does not wait for number of retries.
// This will bring down the test duration drastically because a retryable failure can keep retrying for few seconds.
// And if someone changes retry durations and number of attempts in retry helper, it will impact this test's running time.
// hence sending incorrect segmentsToSeal list which will result in a non retryable failure and this will fail immediately
assertFalse(Futures.await(multiplexer.process(new ScaleOpEvent(scope, stream, Lists.newArrayList(five), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.5, 1.0)), false, System.currentTimeMillis(), System.currentTimeMillis()), () -> false)));
activeSegments = streamStore.getActiveSegments(scope, stream, null, executor).get();
assertTrue(activeSegments.stream().noneMatch(z -> z.segmentId() == three));
assertTrue(activeSegments.stream().noneMatch(z -> z.segmentId() == four));
assertTrue(activeSegments.stream().anyMatch(z -> z.segmentId() == five));
assertTrue(activeSegments.size() == 3);
assertFalse(Futures.await(multiplexer.process(new AbortEvent(scope, stream, 0, UUID.randomUUID(), 11L), () -> false)));
}
Aggregations