use of io.pravega.controller.store.stream.OperationContext in project pravega by pravega.
the class DeleteScopeTask method deleteScopeContent.
public CompletableFuture<Void> deleteScopeContent(String scopeName, OperationContext context, long requestId) {
Map<String, String> readerGroupMap = new HashMap<>();
Iterator<Stream> iterator = listStreams(scopeName, context).asIterator();
// Seal and delete streams and add entry to RGList
while (iterator.hasNext()) {
Stream stream = iterator.next();
Timer timer = new Timer();
if (stream.getStreamName().startsWith(READER_GROUP_STREAM_PREFIX)) {
readerGroupMap.put(stream.getStreamName().substring(READER_GROUP_STREAM_PREFIX.length()), stream.getStreamName());
}
log.debug("Processing seal and delete stream for Stream {}", stream);
Futures.getThrowingException(Futures.exceptionallyExpecting(streamMetadataTasks.sealStream(scopeName, stream.getStreamName(), requestId), e -> {
Throwable unwrap = Exceptions.unwrap(e);
// ignore failures if the stream doesn't exist or we are unable to seal it.
return unwrap instanceof InvalidStreamException || unwrap instanceof ControllerFailureException;
}, Controller.UpdateStreamStatus.Status.STREAM_NOT_FOUND).thenCompose(sealed -> {
ControllerService.reportSealStreamMetrics(scopeName, stream.getStreamName(), sealed, timer.getElapsed());
return CompletableFuture.completedFuture(null);
}).thenCompose(x -> streamMetadataTasks.deleteStream(stream.getScope(), stream.getStreamName(), requestId).thenCompose(status -> {
ControllerService.reportDeleteStreamMetrics(scopeName, stream.getStreamName(), status, timer.getElapsed());
return CompletableFuture.completedFuture(null);
})));
}
// Delete ReaderGroups
for (Map.Entry<String, String> rgMapEntry : readerGroupMap.entrySet()) {
log.debug("Processing delete ReaderGroup for {}", rgMapEntry.getKey());
Timer timer = new Timer();
Futures.getThrowingException(streamMetadataTasks.getReaderGroupConfig(scopeName, rgMapEntry.getKey(), requestId).thenCompose(conf -> streamMetadataTasks.deleteReaderGroup(scopeName, rgMapEntry.getKey(), conf.getConfig().getReaderGroupId(), requestId).thenCompose(status -> {
ControllerService.reportDeleteReaderGroupMetrics(scopeName, rgMapEntry.getValue(), status, timer.getElapsed());
return CompletableFuture.completedFuture(null);
})));
}
// Delete KVTs
Iterator<KeyValueTableInfo> kvtIterator = listKVTs(scopeName, requestId, context).asIterator();
while (kvtIterator.hasNext()) {
String kvt = kvtIterator.next().getKeyValueTableName();
Timer timer = new Timer();
log.debug("Processing delete kvt for {}", kvt);
Futures.getThrowingException(kvtMetadataTasks.deleteKeyValueTable(scopeName, kvt, context.getRequestId()).thenCompose(status -> {
ControllerService.reportDeleteKVTableMetrics(scopeName, kvt, status, timer.getElapsed());
return CompletableFuture.completedFuture(null);
}));
}
return streamMetadataStore.deleteScopeRecursive(scopeName, context, executor).thenApply(status -> {
log.debug("Recursive Delete Scope returned with a status {}", status);
return null;
});
}
use of io.pravega.controller.store.stream.OperationContext in project pravega by pravega.
the class PeriodicWatermarking method watermark.
/**
* This method computes and emits a new watermark for the given stream.
* It collects all the known writers for the given stream and includes only writers that are active (have reported
* their marks recently). If all active writers have reported marks greater than the previously emitted watermark,
* then new watermark is computed and emitted. If not, the window for considering writers as active is progressed.
* @param stream stream for which watermark should be computed.
* @return Returns a completableFuture which when completed will have completed another iteration of periodic watermark
* computation.
*/
public CompletableFuture<Void> watermark(Stream stream) {
String scope = stream.getScope();
String streamName = stream.getStreamName();
long requestId = requestIdGenerator.get();
String requestDescriptor = RequestTracker.buildRequestDescriptor("watermark", stream.getScope(), stream.getStreamName());
requestTracker.trackRequest(requestDescriptor, requestId);
OperationContext context = streamMetadataStore.createStreamContext(scope, streamName, requestId);
if (scope.equals(NameUtils.INTERNAL_SCOPE_NAME)) {
return CompletableFuture.completedFuture(null);
}
log.debug(requestId, "Periodic background processing for watermarking called for stream {}/{}", scope, streamName);
CompletableFuture<Map<String, WriterMark>> allWriterMarks = Futures.exceptionallyExpecting(streamMetadataStore.getAllWriterMarks(scope, streamName, context, executor), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException, Collections.emptyMap());
return allWriterMarks.thenCompose(writers -> {
WatermarkClient watermarkClient = watermarkClientCache.getUnchecked(stream);
try {
watermarkClient.reinitialize();
} catch (Exception e) {
log.warn(requestId, "Watermarking client for stream {} threw exception {} during reinitialize.", stream, Exceptions.unwrap(e).getClass());
if (Exceptions.unwrap(e) instanceof NoSuchSegmentException) {
log.info(requestId, "Invalidating the watermark client in cache for stream {}.", stream);
watermarkClientCache.invalidate(stream);
}
throw e;
}
return streamMetadataStore.getConfiguration(scope, streamName, context, executor).thenCompose(config -> filterWritersAndComputeWatermark(scope, streamName, context, watermarkClient, writers, config));
}).exceptionally(e -> {
log.warn(requestId, "Exception thrown while trying to perform periodic watermark computation. Logging and ignoring.", e);
return null;
});
}
use of io.pravega.controller.store.stream.OperationContext in project pravega by pravega.
the class PeriodicWatermarking method computeWatermark.
/**
* This method takes marks (time + position) of active writers and finds greatest lower bound on time and
* least upper bound on positions and returns the watermark object composed of the two.
* The least upper bound computed from positions may not result in a consistent and complete stream cut.
* So, a positional upper bound is then converted into a stream cut by including segments from higher epoch.
* Also, it is possible that in an effort to fill missing range, we may end up creating an upper bound that
* is composed of segments from highest epoch. In next iteration, from new writer positions, we may be able to
* compute a tighter upper bound. But since watermark has to advance position and time, we will take the upper bound
* of previous stream cut and new stream cut.
*
* @param scope scope
* @param streamName stream name
* @param context operation context
* @param activeWriters marks for all active writers.
* @param previousWatermark previous watermark that was emitted.
* @return CompletableFuture which when completed will contain watermark to be emitted.
*/
private CompletableFuture<Watermark> computeWatermark(String scope, String streamName, OperationContext context, List<Map.Entry<String, WriterMark>> activeWriters, Watermark previousWatermark) {
long requestId = context.getRequestId();
Watermark.WatermarkBuilder builder = Watermark.builder();
ConcurrentHashMap<SegmentWithRange, Long> upperBound = new ConcurrentHashMap<>();
// We are deliberately making two passes over writers - first to find lowest time. Second loop will convert writer
// positions to StreamSegmentRecord objects by retrieving ranges from store. And then perform computation on those
// objects.
LongSummaryStatistics summarized = activeWriters.stream().collect(Collectors.summarizingLong(x -> x.getValue().getTimestamp()));
long lowerBoundOnTime = summarized.getMin();
long upperBoundOnTime = summarized.getMax();
if (lowerBoundOnTime > previousWatermark.getLowerTimeBound()) {
CompletableFuture<List<Map<SegmentWithRange, Long>>> positionsFuture = Futures.allOfWithResults(activeWriters.stream().map(x -> {
return Futures.keysAllOfWithResults(x.getValue().getPosition().entrySet().stream().collect(Collectors.toMap(y -> getSegmentWithRange(scope, streamName, context, y.getKey()), Entry::getValue)));
}).collect(Collectors.toList()));
log.debug(requestId, "Emitting watermark for stream {}/{} with time {}", scope, streamName, lowerBoundOnTime);
return positionsFuture.thenAccept(listOfPositions -> listOfPositions.forEach(position -> {
// add writer positions to upperBound map.
addToUpperBound(position, upperBound);
})).thenCompose(v -> computeStreamCut(scope, streamName, context, upperBound, previousWatermark).thenApply(streamCut -> builder.lowerTimeBound(lowerBoundOnTime).upperTimeBound(upperBoundOnTime).streamCut(ImmutableMap.copyOf(streamCut)).build()));
} else {
// new time is not advanced. No watermark to be emitted.
return CompletableFuture.completedFuture(null);
}
}
use of io.pravega.controller.store.stream.OperationContext in project pravega by pravega.
the class PeriodicRetention method retention.
public CompletableFuture<Void> retention(Stream stream) {
// Track the new request for this automatic truncation.
long requestId = requestIdGenerator.get();
String requestDescriptor = RequestTracker.buildRequestDescriptor("truncateStream", stream.getScope(), stream.getStreamName());
requestTracker.trackRequest(requestDescriptor, requestId);
OperationContext context = streamMetadataStore.createStreamContext(stream.getScope(), stream.getStreamName(), requestId);
log.debug(requestId, "Periodic background processing for retention called for stream {}/{}", stream.getScope(), stream.getStreamName());
return RetryHelper.withRetriesAsync(() -> streamMetadataStore.getConfiguration(stream.getScope(), stream.getStreamName(), context, executor).thenCompose(config -> streamMetadataTasks.retention(stream.getScope(), stream.getStreamName(), config.getRetentionPolicy(), System.currentTimeMillis(), context, this.streamMetadataTasks.retrieveDelegationToken())).exceptionally(e -> {
log.warn(requestId, "Exception thrown while performing auto retention for stream {} ", stream, e);
throw new CompletionException(e);
}), RetryHelper.UNCONDITIONAL_PREDICATE, 5, executor).exceptionally(e -> {
log.warn(requestId, "Unable to perform retention for stream {}. " + "Ignoring, retention will be attempted in next cycle.", stream, e);
return null;
}).thenRun(() -> requestTracker.untrackRequest(requestDescriptor));
}
use of io.pravega.controller.store.stream.OperationContext in project pravega by pravega.
the class ControllerService method getSegmentsAtHead.
public CompletableFuture<Map<SegmentId, Long>> getSegmentsAtHead(final String scope, final String stream, long requestId) {
Exceptions.checkNotNullOrEmpty(scope, "scope");
Exceptions.checkNotNullOrEmpty(stream, "stream");
// First fetch segments active at specified timestamp from the specified stream.
// Divide current segments in segmentFutures into at most count positions.
OperationContext context = streamStore.createStreamContext(scope, stream, requestId);
return streamStore.getSegmentsAtHead(scope, stream, context, executor).thenApply(segments -> {
return segments.entrySet().stream().collect(Collectors.toMap(entry -> ModelHelper.createSegmentId(scope, stream, entry.getKey().segmentId()), Map.Entry::getValue));
});
}
Aggregations