use of io.pravega.controller.store.stream.Segment in project pravega by pravega.
the class TableHelper method getSizeTillStreamCut.
/**
* A method to compute size of stream in bytes from start till given stream cut.
* Note: this computed size is absolute size and even if the stream has been truncated, this size is computed for the
* entire amount of data that was written into the stream.
*
* @param indexTable index table for the stream
* @param historyTable history table for the stream
* @param segmentTable segment table for the stream
* @param streamCut stream cut to compute size till
* @param sealedSegmentsRecord record for all the sealed segments for the given stream.
* @return size (in bytes) of stream till the given stream cut.
*/
public static long getSizeTillStreamCut(final byte[] indexTable, final byte[] historyTable, final byte[] segmentTable, final Map<Integer, Long> streamCut, final SealedSegmentsRecord sealedSegmentsRecord) {
Preconditions.checkNotNull(streamCut);
Preconditions.checkNotNull(indexTable);
Preconditions.checkNotNull(historyTable);
Preconditions.checkNotNull(sealedSegmentsRecord);
Preconditions.checkNotNull(segmentTable);
Preconditions.checkArgument(!streamCut.isEmpty());
Map<Integer, Integer> epochCutMap = computeEpochCutMap(historyTable, indexTable, segmentTable, streamCut);
Map<Segment, Integer> cutMapSegments = transform(segmentTable, epochCutMap);
AtomicLong size = new AtomicLong();
Map<Integer, Long> sealedSegmentSizeMap = sealedSegmentsRecord.getSealedSegmentsSizeMap();
// add sizes for segments in stream cut
streamCut.forEach((key, value) -> size.addAndGet(value));
int highestEpoch = epochCutMap.values().stream().max(Comparator.naturalOrder()).orElse(Integer.MIN_VALUE);
Optional<HistoryRecord> historyRecordOpt = HistoryRecord.readRecord(historyTable, 0, true);
// start with epoch 0 and go all the way upto epochCutMap.highEpoch
while (historyRecordOpt.isPresent() && historyRecordOpt.get().getEpoch() <= highestEpoch) {
HistoryRecord historyRecord = historyRecordOpt.get();
int epoch = historyRecord.getEpoch();
size.addAndGet(historyRecord.getSegments().stream().filter(epochSegmentNumber -> {
Segment epochSegment = getSegment(epochSegmentNumber, segmentTable);
return cutMapSegments.entrySet().stream().noneMatch(cutSegment -> cutSegment.getKey().getNumber() == epochSegment.getNumber() || (cutSegment.getKey().overlaps(epochSegment) && cutSegment.getValue() <= epoch));
}).map(sealedSegmentSizeMap::get).reduce((x, y) -> x + y).orElse(0L));
historyRecordOpt = HistoryRecord.fetchNext(historyRecord, historyTable, true);
}
return size.get();
}
use of io.pravega.controller.store.stream.Segment in project pravega by pravega.
the class TableHelper method computeToDelete.
private static Set<Integer> computeToDelete(Map<Segment, Integer> epochCutMap, byte[] historyTable, byte[] segmentTable, Set<Integer> deletedSegments) {
Set<Integer> toDelete = new HashSet<>();
int highestEpoch = epochCutMap.values().stream().max(Comparator.naturalOrder()).orElse(Integer.MIN_VALUE);
Optional<HistoryRecord> historyRecordOpt = HistoryRecord.readRecord(historyTable, 0, true);
// start with epoch 0 and go all the way upto epochCutMap.highEpoch
while (historyRecordOpt.isPresent() && historyRecordOpt.get().getEpoch() <= highestEpoch) {
HistoryRecord historyRecord = historyRecordOpt.get();
int epoch = historyRecord.getEpoch();
toDelete.addAll(historyRecord.getSegments().stream().filter(epochSegmentNumber -> {
Segment epochSegment = getSegment(epochSegmentNumber, segmentTable);
// toDelete.add(epoch.segment overlaps cut.segment && epoch < cut.segment.epoch)
return !deletedSegments.contains(epochSegmentNumber) && epochCutMap.entrySet().stream().noneMatch(cutSegment -> cutSegment.getKey().getNumber() == epochSegment.getNumber() || (cutSegment.getKey().overlaps(epochSegment) && cutSegment.getValue() <= epoch));
}).collect(Collectors.toSet()));
historyRecordOpt = HistoryRecord.fetchNext(historyRecord, historyTable, true);
}
return toDelete;
}
use of io.pravega.controller.store.stream.Segment in project pravega by pravega.
the class ControllerService method getSegmentsImmediatelyFollowing.
public CompletableFuture<Map<SegmentRange, List<Integer>>> getSegmentsImmediatelyFollowing(SegmentId segment) {
Preconditions.checkNotNull(segment, "segment");
OperationContext context = streamStore.createContext(segment.getStreamInfo().getScope(), segment.getStreamInfo().getStream());
return streamStore.getSuccessors(segment.getStreamInfo().getScope(), segment.getStreamInfo().getStream(), segment.getSegmentNumber(), context, executor).thenComposeAsync(successors -> Futures.keysAllOfWithResults(successors.entrySet().stream().collect(Collectors.toMap(entry -> streamStore.getSegment(segment.getStreamInfo().getScope(), segment.getStreamInfo().getStream(), entry.getKey(), context, executor).thenApply(seg -> ModelHelper.createSegmentRange(segment.getStreamInfo().getScope(), segment.getStreamInfo().getStream(), seg.getNumber(), seg.getKeyStart(), seg.getKeyEnd())), Map.Entry::getValue))), executor);
}
Aggregations