use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.
the class ControllerMetadataJsonSerializerTest method testEpochRecord.
@Test
public void testEpochRecord() {
List<StreamSegmentRecord> list = Lists.newArrayList(StreamSegmentRecord.newSegmentRecord(1, 0, 10L, 0.0, 1.0));
EpochRecord record = new EpochRecord(10, 0, ImmutableList.copyOf(list), 10L, 0L, 0L);
testRecordSerialization(record, EpochRecord.class);
}
use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.
the class PersistentStreamBase method scaleCreateNewEpoch.
@Override
public CompletableFuture<VersionedMetadata<EpochTransitionRecord>> scaleCreateNewEpoch(VersionedMetadata<EpochTransitionRecord> versionedMetadata, OperationContext context) {
Preconditions.checkNotNull(context, "Operation context cannot be null");
return getActiveEpochRecord(true, context).thenCompose(currentEpoch -> {
// only perform idempotent update. If update is already completed, do nothing.
if (currentEpoch.getEpoch() < versionedMetadata.getObject().getNewEpoch()) {
EpochTransitionRecord epochTransition = versionedMetadata.getObject();
// time
long time = Math.max(epochTransition.getTime(), currentEpoch.getCreationTime() + 1);
// new segments
ImmutableList.Builder<StreamSegmentRecord> newSegmentsBuilder = ImmutableList.builder();
epochTransition.getNewSegmentsWithRange().forEach((key, value) -> newSegmentsBuilder.add(newSegmentRecord(key, time, value.getKey(), value.getValue())));
// sealed segments
ImmutableList.Builder<StreamSegmentRecord> sealedSegmentsBuilder = ImmutableList.builder();
epochTransition.getSegmentsToSeal().forEach(x -> sealedSegmentsBuilder.add(currentEpoch.getSegment(x)));
// overall segments in epoch
ImmutableList.Builder<StreamSegmentRecord> builder = ImmutableList.builder();
currentEpoch.getSegments().forEach(x -> {
if (!epochTransition.getSegmentsToSeal().contains(x.segmentId())) {
builder.add(x);
}
});
ImmutableList<StreamSegmentRecord> newSegments = newSegmentsBuilder.build();
builder.addAll(newSegments);
ImmutableList<StreamSegmentRecord> newEpochSegments = builder.build();
// epoch record
return getSplitMergeCountsTillEpoch(currentEpoch, context).thenCompose(cumulativeSplitMergeCount -> {
EpochRecord epochRecord = new EpochRecord(epochTransition.getNewEpoch(), epochTransition.getNewEpoch(), newEpochSegments, time, getNewEpochSplitCount(cumulativeSplitMergeCount.getKey(), currentEpoch.getSegments(), newEpochSegments), getNewEpochMergeCount(cumulativeSplitMergeCount.getValue(), currentEpoch.getSegments(), newEpochSegments));
HistoryTimeSeriesRecord timeSeriesRecord = new HistoryTimeSeriesRecord(epochTransition.getNewEpoch(), epochTransition.getNewEpoch(), sealedSegmentsBuilder.build(), newSegments, epochRecord.getCreationTime());
return createEpochRecord(epochRecord, context).thenCompose(x -> updateHistoryTimeSeries(timeSeriesRecord, context)).thenCompose(x -> createSegmentSealedEpochRecords(epochTransition.getSegmentsToSeal(), epochTransition.getNewEpoch(), context)).thenApply(x -> versionedMetadata);
});
} else {
return CompletableFuture.completedFuture(versionedMetadata);
}
});
}
use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.
the class PersistentStreamBase method getSuccessorsWithPredecessors.
@Override
public CompletableFuture<Map<StreamSegmentRecord, List<Long>>> getSuccessorsWithPredecessors(final long segmentId, OperationContext context) {
Preconditions.checkNotNull(context, "Operation context cannot be null");
// fetch segment sealed epoch record.
return getSegmentSealedEpoch(segmentId, context).thenCompose(sealedEpoch -> {
if (sealedEpoch < 0) {
return getActiveEpoch(true, context).thenApply(activeSegments -> Collections.emptyMap());
}
// if sealed record exists. fetch its sealing epoch.
// Note: sealed record is created even before the segment is sealed. So if client is requesting for successor,
// we should find it.
CompletableFuture<EpochRecord> sealedEpochFuture = getEpochRecord(sealedEpoch, context);
// fetch previous epoch as well.
CompletableFuture<EpochRecord> previousEpochFuture = getEpochRecord(sealedEpoch - 1, context);
return CompletableFuture.allOf(sealedEpochFuture, previousEpochFuture).thenApply(x -> {
EpochRecord sealedEpochRecord = sealedEpochFuture.join();
EpochRecord previousEpochRecord = previousEpochFuture.join();
Optional<StreamSegmentRecord> segmentOpt = previousEpochRecord.getSegments().stream().filter(r -> r.segmentId() == segmentId).findAny();
assert segmentOpt.isPresent();
StreamSegmentRecord segment = segmentOpt.get();
List<StreamSegmentRecord> successors = sealedEpochRecord.getSegments().stream().filter(r -> r.overlaps(segment)).collect(Collectors.toList());
return successors.stream().collect(Collectors.toMap(record -> record, z -> previousEpochRecord.getSegments().stream().filter(predecessor -> predecessor.overlaps(z)).map(StreamSegmentRecord::segmentId).collect(Collectors.toList())));
});
});
}
use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.
the class PersistentStreamBase method isStreamCutValidInternal.
private boolean isStreamCutValidInternal(Map<Long, Long> streamCut, int epochLow, List<EpochRecord> epochs) {
Set<StreamSegmentRecord> segmentsInStreamCut = new HashSet<>();
Set<StreamSegmentRecord> futureSegment = new HashSet<>();
boolean isValid = true;
// for each segment get its epoch and the segment record
streamCut.forEach((key, value) -> {
int epoch = NameUtils.getEpoch(key);
int index = epoch - epochLow;
EpochRecord epochRecord = epochs.get(index);
StreamSegmentRecord segmentRecord = epochRecord.getSegment(key);
if (value < 0) {
futureSegment.add(segmentRecord);
} else {
segmentsInStreamCut.add(segmentRecord);
}
});
isValid = futureSegment.stream().allMatch(x -> segmentsInStreamCut.stream().filter(y -> y.overlaps(x)).allMatch(y -> y.segmentId() < x.segmentId()));
if (isValid) {
List<StreamSegmentRecord> sorted = segmentsInStreamCut.stream().sorted(Comparator.comparingDouble(StreamSegmentRecord::getKeyStart)).collect(Collectors.toList());
// all future segments should have a predecessor and all missing ranges should be covered by a future segment.
Map<Double, Double> missingRanges = new HashMap<>();
StreamSegmentRecord previous = sorted.get(0);
BiFunction<Double, Double, Boolean> validate = (start, end) -> futureSegment.stream().anyMatch(x -> x.overlaps(start, end));
if (previous.getKeyStart() > 0.0) {
double start = 0.0;
double end = previous.getKeyStart();
missingRanges.put(start, end);
// missing range should be covered by a future segment
isValid = validate.apply(start, end);
}
for (int i = 1; i < sorted.size(); i++) {
StreamSegmentRecord next = sorted.get(i);
if (previous.getKeyEnd() < next.getKeyStart()) {
double start = previous.getKeyEnd();
double end = next.getKeyStart();
missingRanges.put(start, end);
// missing range should be covered by a future segment
isValid = validate.apply(start, end);
if (!isValid) {
break;
}
} else if (previous.getKeyEnd() > next.getKeyStart()) {
isValid = false;
break;
}
previous = next;
}
if (previous.getKeyEnd() < 1.0) {
double start = previous.getKeyEnd();
double end = 1.0;
missingRanges.put(start, end);
isValid = validate.apply(start, end);
}
if (isValid) {
List<StreamSegmentRecord> toCheck = new ArrayList<>();
Set<StreamSegmentRecord> fullyReadFrom = new HashSet<>();
// now traverse the stream for missing ranges and verify that we can reach those future segments
// in logically consistent fashion for the missing ranges.
missingRanges.entrySet().forEach(x -> toCheck.addAll(findSegmentsForMissingRange(epochs.get(0), x)));
while (!toCheck.isEmpty()) {
StreamSegmentRecord segmentRecord = toCheck.get(0);
if (!(fullyReadFrom.contains(segmentRecord) || segmentsInStreamCut.contains(segmentRecord) || futureSegment.contains(segmentRecord))) {
for (StreamSegmentRecord s : segmentsInStreamCut) {
if (s.overlaps(segmentRecord)) {
if (s.segmentId() < segmentRecord.segmentId()) {
// if segment record has a predecessor, then it should have been in future segment.
if (!futureSegment.contains(segmentRecord)) {
return false;
} else {
// segment record is a predecessor of a previous segment.
fullyReadFrom.add(segmentRecord);
}
} else {
// if segment is predecessor of another segment in the stream cut then it has to be
// fully read
fullyReadFrom.add(segmentRecord);
// find successors of segmentRecord and add it to tocheck list
int segmentEpoch = NameUtils.getEpoch(segmentRecord.segmentId());
int index = segmentEpoch - epochLow;
for (int i = index; i < epochs.size(); i++) {
if (!epochs.get(i).containsSegment(segmentRecord.segmentId())) {
epochs.get(i).getSegments().forEach(x -> {
if (x.overlaps(segmentRecord)) {
toCheck.add(x);
}
});
break;
}
}
}
}
}
}
toCheck.remove(segmentRecord);
}
}
}
return isValid;
}
use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.
the class PersistentStreamBase method startScale.
@Override
public CompletableFuture<VersionedMetadata<EpochTransitionRecord>> startScale(boolean isManualScale, VersionedMetadata<EpochTransitionRecord> record, VersionedMetadata<State> state, OperationContext context) {
Preconditions.checkNotNull(context, "Operation context cannot be null");
Preconditions.checkArgument(state.getObject().equals(State.SCALING));
return getCurrentEpochRecordData(true, context).thenCompose(currentEpoch -> {
EpochRecord currentEpochRecord = currentEpoch.getObject();
if (isManualScale) {
return migrateManualScaleToNewEpoch(record, state, currentEpochRecord, context);
} else {
// epoch transition record and reset the state to active.
return discardInconsistentEpochTransition(record, state, currentEpochRecord, context);
}
});
}
Aggregations