use of io.pravega.controller.store.stream.records.WriterMark in project pravega by pravega.
the class ZKStream method createWriterMarkRecord.
@Override
CompletableFuture<Void> createWriterMarkRecord(String writer, long timestamp, ImmutableMap<Long, Long> position, OperationContext context) {
String writerPath = getWriterPath(writer);
WriterMark mark = new WriterMark(timestamp, position);
return Futures.toVoid(store.createZNode(writerPath, mark.toBytes()));
}
use of io.pravega.controller.store.stream.records.WriterMark in project pravega by pravega.
the class PeriodicWatermarking method watermark.
/**
* This method computes and emits a new watermark for the given stream.
* It collects all the known writers for the given stream and includes only writers that are active (have reported
* their marks recently). If all active writers have reported marks greater than the previously emitted watermark,
* then new watermark is computed and emitted. If not, the window for considering writers as active is progressed.
* @param stream stream for which watermark should be computed.
* @return Returns a completableFuture which when completed will have completed another iteration of periodic watermark
* computation.
*/
public CompletableFuture<Void> watermark(Stream stream) {
String scope = stream.getScope();
String streamName = stream.getStreamName();
long requestId = requestIdGenerator.get();
String requestDescriptor = RequestTracker.buildRequestDescriptor("watermark", stream.getScope(), stream.getStreamName());
requestTracker.trackRequest(requestDescriptor, requestId);
OperationContext context = streamMetadataStore.createStreamContext(scope, streamName, requestId);
if (scope.equals(NameUtils.INTERNAL_SCOPE_NAME)) {
return CompletableFuture.completedFuture(null);
}
log.debug(requestId, "Periodic background processing for watermarking called for stream {}/{}", scope, streamName);
CompletableFuture<Map<String, WriterMark>> allWriterMarks = Futures.exceptionallyExpecting(streamMetadataStore.getAllWriterMarks(scope, streamName, context, executor), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException, Collections.emptyMap());
return allWriterMarks.thenCompose(writers -> {
WatermarkClient watermarkClient = watermarkClientCache.getUnchecked(stream);
try {
watermarkClient.reinitialize();
} catch (Exception e) {
log.warn(requestId, "Watermarking client for stream {} threw exception {} during reinitialize.", stream, Exceptions.unwrap(e).getClass());
if (Exceptions.unwrap(e) instanceof NoSuchSegmentException) {
log.info(requestId, "Invalidating the watermark client in cache for stream {}.", stream);
watermarkClientCache.invalidate(stream);
}
throw e;
}
return streamMetadataStore.getConfiguration(scope, streamName, context, executor).thenCompose(config -> filterWritersAndComputeWatermark(scope, streamName, context, watermarkClient, writers, config));
}).exceptionally(e -> {
log.warn(requestId, "Exception thrown while trying to perform periodic watermark computation. Logging and ignoring.", e);
return null;
});
}
use of io.pravega.controller.store.stream.records.WriterMark in project pravega by pravega.
the class PeriodicWatermarking method computeWatermark.
/**
* This method takes marks (time + position) of active writers and finds greatest lower bound on time and
* least upper bound on positions and returns the watermark object composed of the two.
* The least upper bound computed from positions may not result in a consistent and complete stream cut.
* So, a positional upper bound is then converted into a stream cut by including segments from higher epoch.
* Also, it is possible that in an effort to fill missing range, we may end up creating an upper bound that
* is composed of segments from highest epoch. In next iteration, from new writer positions, we may be able to
* compute a tighter upper bound. But since watermark has to advance position and time, we will take the upper bound
* of previous stream cut and new stream cut.
*
* @param scope scope
* @param streamName stream name
* @param context operation context
* @param activeWriters marks for all active writers.
* @param previousWatermark previous watermark that was emitted.
* @return CompletableFuture which when completed will contain watermark to be emitted.
*/
private CompletableFuture<Watermark> computeWatermark(String scope, String streamName, OperationContext context, List<Map.Entry<String, WriterMark>> activeWriters, Watermark previousWatermark) {
long requestId = context.getRequestId();
Watermark.WatermarkBuilder builder = Watermark.builder();
ConcurrentHashMap<SegmentWithRange, Long> upperBound = new ConcurrentHashMap<>();
// We are deliberately making two passes over writers - first to find lowest time. Second loop will convert writer
// positions to StreamSegmentRecord objects by retrieving ranges from store. And then perform computation on those
// objects.
LongSummaryStatistics summarized = activeWriters.stream().collect(Collectors.summarizingLong(x -> x.getValue().getTimestamp()));
long lowerBoundOnTime = summarized.getMin();
long upperBoundOnTime = summarized.getMax();
if (lowerBoundOnTime > previousWatermark.getLowerTimeBound()) {
CompletableFuture<List<Map<SegmentWithRange, Long>>> positionsFuture = Futures.allOfWithResults(activeWriters.stream().map(x -> {
return Futures.keysAllOfWithResults(x.getValue().getPosition().entrySet().stream().collect(Collectors.toMap(y -> getSegmentWithRange(scope, streamName, context, y.getKey()), Entry::getValue)));
}).collect(Collectors.toList()));
log.debug(requestId, "Emitting watermark for stream {}/{} with time {}", scope, streamName, lowerBoundOnTime);
return positionsFuture.thenAccept(listOfPositions -> listOfPositions.forEach(position -> {
// add writer positions to upperBound map.
addToUpperBound(position, upperBound);
})).thenCompose(v -> computeStreamCut(scope, streamName, context, upperBound, previousWatermark).thenApply(streamCut -> builder.lowerTimeBound(lowerBoundOnTime).upperTimeBound(upperBoundOnTime).streamCut(ImmutableMap.copyOf(streamCut)).build()));
} else {
// new time is not advanced. No watermark to be emitted.
return CompletableFuture.completedFuture(null);
}
}
use of io.pravega.controller.store.stream.records.WriterMark in project pravega by pravega.
the class StreamMetadataStoreTest method testWriterMark.
@Test(timeout = 30000)
public void testWriterMark() {
String stream = "mark";
store.createScope(scope, null, executor).join();
store.createStream(scope, stream, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), System.currentTimeMillis(), null, executor).join();
// data not found
String writer1 = "writer1";
AssertExtensions.assertFutureThrows("", store.getWriterMark(scope, stream, writer1, null, executor), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException);
// now note writer record
store.noteWriterMark(scope, stream, writer1, 0L, Collections.singletonMap(0L, 1L), null, executor).join();
store.getWriterMark(scope, stream, writer1, null, executor).join();
// update writer record
store.noteWriterMark(scope, stream, writer1, 1L, Collections.singletonMap(0L, 2L), null, executor).join();
WriterMark writerMark = store.getWriterMark(scope, stream, writer1, null, executor).join();
assertTrue(writerMark.isAlive());
Map<String, WriterMark> marks = store.getAllWriterMarks(scope, stream, null, executor).join();
assertTrue(marks.containsKey(writer1));
store.shutdownWriter(scope, stream, writer1, null, executor).join();
writerMark = store.getWriterMark(scope, stream, writer1, null, executor).join();
assertFalse(writerMark.isAlive());
// note a mark after a writer has been shutdown. It should become alive again.
store.noteWriterMark(scope, stream, writer1, 2L, Collections.singletonMap(0L, 2L), null, executor).join();
writerMark = store.getWriterMark(scope, stream, writer1, null, executor).join();
assertTrue(writerMark.isAlive());
// remove writer
AssertExtensions.assertFutureThrows("Mismatched writer mark did not throw exception", store.removeWriter(scope, stream, writer1, WriterMark.EMPTY, null, executor), e -> Exceptions.unwrap(e) instanceof StoreException.WriteConflictException);
store.removeWriter(scope, stream, writer1, writerMark, null, executor).join();
AssertExtensions.assertFutureThrows("", store.getWriterMark(scope, stream, writer1, null, executor), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException);
marks = store.getAllWriterMarks(scope, stream, null, executor).join();
assertTrue(marks.isEmpty());
String writer2 = "writer2";
store.noteWriterMark(scope, stream, writer2, 1L, Collections.singletonMap(0L, 1L), null, executor).join();
String writer3 = "writer3";
store.noteWriterMark(scope, stream, writer3, 1L, Collections.singletonMap(0L, 1L), null, executor).join();
String writer4 = "writer4";
store.noteWriterMark(scope, stream, writer4, 1L, Collections.singletonMap(0L, 1L), null, executor).join();
marks = store.getAllWriterMarks(scope, stream, null, executor).join();
assertFalse(marks.containsKey(writer1));
assertTrue(marks.containsKey(writer2));
assertTrue(marks.containsKey(writer3));
assertTrue(marks.containsKey(writer4));
}
use of io.pravega.controller.store.stream.records.WriterMark in project pravega by pravega.
the class StreamTestBase method testTransactionMarkFromSingleWriter.
@Test(timeout = 30000L)
public void testTransactionMarkFromSingleWriter() {
PersistentStreamBase streamObj = spy(createStream("txnMark", "txnMark", System.currentTimeMillis(), 1, 0));
String writer = "writer";
UUID txnId1 = new UUID(0L, 0L);
UUID txnId2 = new UUID(0L, 1L);
UUID txnId3 = new UUID(0L, 2L);
UUID txnId4 = new UUID(0L, 3L);
long time = 1L;
// create 4 transactions with same writer id.
// two of the transactions should have same highest time.
OperationContext context = getContext();
VersionedTransactionData tx01 = streamObj.createTransaction(txnId1, 100, 100, context).join();
streamObj.sealTransaction(txnId1, true, Optional.of(tx01.getVersion()), writer, time, context).join();
VersionedTransactionData tx02 = streamObj.createTransaction(txnId2, 100, 100, context).join();
streamObj.sealTransaction(txnId2, true, Optional.of(tx02.getVersion()), writer, time + 1L, context).join();
VersionedTransactionData tx03 = streamObj.createTransaction(txnId3, 100, 100, context).join();
streamObj.sealTransaction(txnId3, true, Optional.of(tx03.getVersion()), writer, time + 4L, context).join();
VersionedTransactionData tx04 = streamObj.createTransaction(txnId4, 100, 100, context).join();
streamObj.sealTransaction(txnId4, true, Optional.of(tx04.getVersion()), writer, time + 4L, context).join();
streamObj.startCommittingTransactions(100, context).join();
TxnWriterMark writerMarks = new TxnWriterMark(time + 4L, Collections.singletonMap(0L, 1L), txnId4);
Map<String, TxnWriterMark> marksForWriters = Collections.singletonMap(writer, writerMarks);
streamObj.generateMarksForTransactions(context, marksForWriters).join();
// verify that writer mark is created in the store
WriterMark mark = streamObj.getWriterMark(writer, context).join();
assertEquals(mark.getTimestamp(), time + 4L);
// verify that only one call to note time is made
verify(streamObj, times(1)).noteWriterMark(anyString(), anyLong(), any(), any());
}
Aggregations