Search in sources :

Example 6 with WriterMark

use of io.pravega.controller.store.stream.records.WriterMark in project pravega by pravega.

the class WatermarkWorkflowTest method testWatermarkClient.

@Test(timeout = 10000L)
public void testWatermarkClient() {
    Stream stream = new StreamImpl("scope", "stream");
    SynchronizerClientFactory clientFactory = spy(SynchronizerClientFactory.class);
    @Cleanup MockRevisionedStreamClient revisionedClient = new MockRevisionedStreamClient();
    doAnswer(x -> revisionedClient).when(clientFactory).createRevisionedStreamClient(anyString(), any(), any());
    @Cleanup PeriodicWatermarking.WatermarkClient client = new PeriodicWatermarking.WatermarkClient(stream, clientFactory);
    // iteration 1 ==> null -> w1
    client.reinitialize();
    // There is no watermark in the stream. All values should be null and all writers active and participating.
    assertEquals(revisionedClient.getMark(), MockRevision.EMPTY);
    assertTrue(revisionedClient.watermarks.isEmpty());
    assertEquals(client.getPreviousWatermark(), Watermark.EMPTY);
    Map.Entry<String, WriterMark> entry0 = new AbstractMap.SimpleEntry<>("writerId", new WriterMark(0L, ImmutableMap.of()));
    Map.Entry<String, WriterMark> entry1 = new AbstractMap.SimpleEntry<>("writerId", new WriterMark(1L, ImmutableMap.of()));
    Map.Entry<String, WriterMark> entry2 = new AbstractMap.SimpleEntry<>("writerId", new WriterMark(2L, ImmutableMap.of()));
    Map.Entry<String, WriterMark> entry3 = new AbstractMap.SimpleEntry<>("writerId", new WriterMark(3L, ImmutableMap.of()));
    Map.Entry<String, WriterMark> entry4 = new AbstractMap.SimpleEntry<>("writerId", new WriterMark(4L, ImmutableMap.of()));
    Map.Entry<String, WriterMark> entry5 = new AbstractMap.SimpleEntry<>("writerId", new WriterMark(5L, ImmutableMap.of()));
    assertTrue(client.isWriterActive(entry0, 0L));
    assertTrue(client.isWriterParticipating(0L));
    Watermark first = new Watermark(1L, 2L, ImmutableMap.of());
    client.completeIteration(first);
    // iteration 2 : do not emit ==> w1 -> w1
    client.reinitialize();
    // There is one watermark. All writers should be active and writers greater than last watermark should be participating
    assertEquals(revisionedClient.getMark(), MockRevision.EMPTY);
    assertEquals(revisionedClient.watermarks.size(), 1);
    assertEquals(client.getPreviousWatermark(), first);
    assertTrue(client.isWriterActive(entry2, 0L));
    assertFalse(client.isWriterActive(entry1, 0L));
    assertTrue(client.isWriterTracked(entry1.getKey()));
    assertFalse(client.isWriterParticipating(1L));
    assertTrue(client.isWriterParticipating(2L));
    // dont emit a watermark. Everything stays same as before.
    client.completeIteration(null);
    // iteration 3 : emit ==> w1 -> w1 w2
    client.reinitialize();
    // There is one watermark. All writers should be active and writers greater than last watermark should be participating
    assertEquals(revisionedClient.getMark(), MockRevision.EMPTY);
    assertEquals(revisionedClient.watermarks.size(), 1);
    assertEquals(client.getPreviousWatermark(), first);
    assertTrue(client.isWriterActive(entry2, 0L));
    assertFalse(client.isWriterParticipating(1L));
    assertTrue(client.isWriterParticipating(2L));
    // emit second watermark
    Watermark second = new Watermark(2L, 3L, ImmutableMap.of());
    client.completeIteration(second);
    // iteration 4: do not emit ==> w1 w2 -> w1 w2
    client.reinitialize();
    assertEquals(revisionedClient.getMark(), revisionedClient.watermarks.get(0).getKey());
    assertEquals(2, revisionedClient.watermarks.size());
    assertEquals(client.getPreviousWatermark(), second);
    assertFalse(client.isWriterActive(entry2, 0L));
    assertTrue(client.isWriterTracked(entry2.getKey()));
    assertTrue(client.isWriterActive(entry3, 0L));
    assertFalse(client.isWriterParticipating(2L));
    assertTrue(client.isWriterParticipating(3L));
    assertTrue(client.isWriterActive(entry0, 1000L));
    assertTrue(client.isWriterTracked(entry0.getKey()));
    // dont emit a watermark but complete this iteration.
    client.completeIteration(null);
    // iteration 6: emit ==> w1 w2 -> w1 w2 w3
    client.reinitialize();
    assertEquals(revisionedClient.getMark(), revisionedClient.watermarks.get(0).getKey());
    assertEquals(2, revisionedClient.watermarks.size());
    assertEquals(client.getPreviousWatermark(), second);
    assertTrue(client.isWriterActive(entry3, 0L));
    assertFalse(client.isWriterTracked(entry3.getKey()));
    assertFalse(client.isWriterParticipating(2L));
    assertTrue(client.isWriterParticipating(3L));
    // emit third watermark
    Watermark third = new Watermark(3L, 4L, ImmutableMap.of());
    client.completeIteration(third);
    // iteration 7: do not emit ==> w1 w2 w3 -> w1 w2 w3
    client.reinitialize();
    // active writers should be ahead of first watermark. participating writers should be ahead of second watermark
    assertEquals(revisionedClient.getMark(), revisionedClient.watermarks.get(1).getKey());
    assertEquals(3, revisionedClient.watermarks.size());
    assertEquals(client.getPreviousWatermark(), third);
    assertFalse(client.isWriterActive(entry3, 0L));
    assertTrue(client.isWriterActive(entry4, 0L));
    assertFalse(client.isWriterParticipating(3L));
    assertTrue(client.isWriterParticipating(4L));
    client.completeIteration(null);
    // iteration 8 : emit ==> w2 w3 -> w2 w3 w4
    client.reinitialize();
    assertEquals(revisionedClient.getMark(), revisionedClient.watermarks.get(1).getKey());
    // window = w2 w3
    assertEquals(revisionedClient.watermarks.size(), 3);
    assertEquals(client.getPreviousWatermark(), third);
    assertFalse(client.isWriterActive(entry3, 0L));
    assertTrue(client.isWriterActive(entry4, 0L));
    assertFalse(client.isWriterParticipating(3L));
    assertTrue(client.isWriterParticipating(4L));
    // emit fourth watermark
    Watermark fourth = new Watermark(4L, 5L, ImmutableMap.of());
    client.completeIteration(fourth);
    // iteration 9: do not emit ==> w1 w2 w3 w4 -> w1 w2 w3 w4.. check writer timeout
    client.reinitialize();
    assertEquals(revisionedClient.getMark(), revisionedClient.watermarks.get(2).getKey());
    assertEquals(revisionedClient.watermarks.size(), 4);
    assertEquals(client.getPreviousWatermark(), fourth);
    assertFalse(client.isWriterActive(entry3, 0L));
    assertTrue(client.isWriterTracked(entry4.getKey()));
    assertFalse(client.isWriterParticipating(4L));
    assertTrue(client.isWriterParticipating(5L));
    // verify that writer is active if we specify a higher timeout
    assertTrue(client.isWriterActive(entry1, 1000L));
    assertTrue(client.isWriterTracked(entry1.getKey()));
    // now that the writer is being tracked
    assertFalse(Futures.delayedTask(() -> client.isWriterActive(entry1, 1L), Duration.ofSeconds(1), executor).join());
    assertTrue(client.isWriterTracked(entry1.getKey()));
    // dont emit a watermark but complete this iteration. This should shrink the window again.
    client.completeIteration(null);
    // iteration 10
    client.reinitialize();
    assertEquals(revisionedClient.getMark(), revisionedClient.watermarks.get(2).getKey());
    assertEquals(revisionedClient.watermarks.size(), 4);
    assertEquals(client.getPreviousWatermark(), fourth);
    assertFalse(client.isWriterActive(entry4, 0L));
    assertTrue(client.isWriterActive(entry5, 0L));
    assertFalse(client.isWriterParticipating(4L));
    assertTrue(client.isWriterParticipating(5L));
}
Also used : WriterMark(io.pravega.controller.store.stream.records.WriterMark) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) Cleanup(lombok.Cleanup) SynchronizerClientFactory(io.pravega.client.SynchronizerClientFactory) StreamImpl(io.pravega.client.stream.impl.StreamImpl) Stream(io.pravega.client.stream.Stream) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) AbstractMap(java.util.AbstractMap) Watermark(io.pravega.shared.watermarks.Watermark) Test(org.junit.Test)

Example 7 with WriterMark

use of io.pravega.controller.store.stream.records.WriterMark in project pravega by pravega.

the class WatermarkWorkflowTest method testWatermarkingWorkflow.

@Test(timeout = 30000L)
public void testWatermarkingWorkflow() {
    SynchronizerClientFactory clientFactory = spy(SynchronizerClientFactory.class);
    ConcurrentHashMap<String, MockRevisionedStreamClient> revisionedStreamClientMap = new ConcurrentHashMap<>();
    doAnswer(x -> {
        String streamName = x.getArgument(0);
        return revisionedStreamClientMap.compute(streamName, (s, rsc) -> {
            if (rsc != null) {
                return rsc;
            } else {
                return new MockRevisionedStreamClient();
            }
        });
    }).when(clientFactory).createRevisionedStreamClient(anyString(), any(), any());
    @Cleanup PeriodicWatermarking periodicWatermarking = new PeriodicWatermarking(streamMetadataStore, bucketStore, sp -> clientFactory, executor, new RequestTracker(false));
    String streamName = "stream";
    String scope = "scope";
    streamMetadataStore.createScope(scope, null, executor).join();
    streamMetadataStore.createStream(scope, streamName, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(3)).timestampAggregationTimeout(10000L).build(), System.currentTimeMillis(), null, executor).join();
    streamMetadataStore.setState(scope, streamName, State.ACTIVE, null, executor).join();
    // set minimum number of segments to 1
    StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).timestampAggregationTimeout(10000L).build();
    streamMetadataStore.startUpdateConfiguration(scope, streamName, config, null, executor).join();
    VersionedMetadata<StreamConfigurationRecord> configRecord = streamMetadataStore.getConfigurationRecord(scope, streamName, null, executor).join();
    streamMetadataStore.completeUpdateConfiguration(scope, streamName, configRecord, null, executor).join();
    // 2. note writer1, writer2, writer3 marks
    // writer 1 reports segments 0, 1.
    // writer 2 reports segments 1, 2,
    // writer 3 reports segment 0, 2
    String writer1 = "writer1";
    Map<Long, Long> map1 = ImmutableMap.of(0L, 100L, 1L, 200L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer1, 100L, map1, null, executor).join();
    String writer2 = "writer2";
    Map<Long, Long> map2 = ImmutableMap.of(1L, 100L, 2L, 200L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer2, 101L, map2, null, executor).join();
    String writer3 = "writer3";
    Map<Long, Long> map3 = ImmutableMap.of(2L, 100L, 0L, 200L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer3, 102L, map3, null, executor).join();
    // 3. run watermarking workflow.
    StreamImpl stream = new StreamImpl(scope, streamName);
    periodicWatermarking.watermark(stream).join();
    // verify that a watermark has been emitted.
    // this should emit a watermark that contains all three segments with offsets = 200L
    // and timestamp = 100L
    MockRevisionedStreamClient revisionedClient = revisionedStreamClientMap.get(NameUtils.getMarkStreamForStream(streamName));
    assertEquals(revisionedClient.watermarks.size(), 1);
    Watermark watermark = revisionedClient.watermarks.get(0).getValue();
    assertEquals(watermark.getLowerTimeBound(), 100L);
    assertEquals(watermark.getStreamCut().size(), 3);
    assertEquals(getSegmentOffset(watermark, 0L), 200L);
    assertEquals(getSegmentOffset(watermark, 1L), 200L);
    assertEquals(getSegmentOffset(watermark, 2L), 200L);
    // send positions only on segment 1 and segment 2. nothing on segment 0.
    map1 = ImmutableMap.of(1L, 300L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer1, 200L, map1, null, executor).join();
    map2 = ImmutableMap.of(1L, 100L, 2L, 300L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer2, 201L, map2, null, executor).join();
    map3 = ImmutableMap.of(2L, 300L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer3, 202L, map3, null, executor).join();
    // run watermark workflow. this will emit a watermark with time = 200L and streamcut = 0 -> 200L, 1 -> 300L, 2 -> 300L
    periodicWatermarking.watermark(stream).join();
    assertEquals(revisionedClient.watermarks.size(), 2);
    watermark = revisionedClient.watermarks.get(1).getValue();
    assertEquals(watermark.getLowerTimeBound(), 200L);
    assertEquals(watermark.getStreamCut().size(), 3);
    assertEquals(getSegmentOffset(watermark, 0L), 200L);
    assertEquals(getSegmentOffset(watermark, 1L), 300L);
    assertEquals(getSegmentOffset(watermark, 2L), 300L);
    // scale stream 0, 1, 2 -> 3, 4
    scaleStream(streamName, scope);
    // writer 1 reports segments 0, 1.
    // writer 2 reports segments 1, 2
    // writer 3 reports segment 3
    map1 = ImmutableMap.of(0L, 300L, 1L, 400L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer1, 302L, map1, null, executor).join();
    map2 = ImmutableMap.of(1L, 100L, 2L, 400L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer2, 301L, map2, null, executor).join();
    long segment3 = NameUtils.computeSegmentId(3, 1);
    long segment4 = NameUtils.computeSegmentId(4, 1);
    map3 = ImmutableMap.of(segment3, 100L);
    // writer 3 has lowest reported time.
    streamMetadataStore.noteWriterMark(scope, streamName, writer3, 300L, map3, null, executor).join();
    // run watermark workflow. this will emit a watermark with time = 300L and streamcut = 3 -> 100L, 4 -> 0L
    periodicWatermarking.watermark(stream).join();
    assertEquals(revisionedClient.watermarks.size(), 3);
    watermark = revisionedClient.watermarks.get(2).getValue();
    assertEquals(watermark.getLowerTimeBound(), 300L);
    assertEquals(watermark.getStreamCut().size(), 2);
    assertEquals(getSegmentOffset(watermark, segment3), 100L);
    assertEquals(getSegmentOffset(watermark, segment4), 0L);
    // report complete positions from writers.
    // writer 1 reports 0, 1, 2
    // writer 2 reports 0, 1, 2
    // writer 3 doesnt report.
    map1 = ImmutableMap.of(0L, 400L, 1L, 400L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer1, 400L, map1, null, executor).join();
    map2 = ImmutableMap.of(1L, 100L, 2L, 400L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer2, 401L, map2, null, executor).join();
    // run watermark workflow. there shouldn't be a watermark emitted because writer 3 is active and has not reported a time.
    periodicWatermarking.watermark(stream).join();
    assertEquals(revisionedClient.watermarks.size(), 3);
    // even though writer3 is excluded from computation, its mark is still not removed because it is still active
    WriterMark writer3Mark = streamMetadataStore.getWriterMark(scope, streamName, writer3, null, executor).join();
    assertTrue(writer3Mark.isAlive());
    assertEquals(writer3Mark.getTimestamp(), 300L);
    // report shutdown of writer 3
    streamMetadataStore.shutdownWriter(scope, streamName, writer3, null, executor).join();
    writer3Mark = streamMetadataStore.getWriterMark(scope, streamName, writer3, null, executor).join();
    assertFalse(writer3Mark.isAlive());
    assertEquals(writer3Mark.getTimestamp(), 300L);
    // now a watermark should be generated. Time should be advanced. But watermark's stream cut is already ahead of writer's
    // positions so stream cut should not advance.
    // Also writer 3 being inactive and shutdown, should be removed.
    periodicWatermarking.watermark(stream).join();
    assertEquals(revisionedClient.watermarks.size(), 4);
    watermark = revisionedClient.watermarks.get(3).getValue();
    assertEquals(watermark.getLowerTimeBound(), 400L);
    assertEquals(watermark.getStreamCut().size(), 2);
    assertEquals(getSegmentOffset(watermark, segment3), 100L);
    assertEquals(getSegmentOffset(watermark, segment4), 0L);
    AssertExtensions.assertFutureThrows("Writer 3 should have been removed from store", streamMetadataStore.getWriterMark(scope, streamName, writer3, null, executor), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException);
    // writer 1, 2 and 3 report marks. With writer 3 reporting mark on segment 4. Writer3 will get added again
    map1 = ImmutableMap.of(0L, 500L, 1L, 500L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer1, 500L, map1, null, executor).join();
    map2 = ImmutableMap.of(1L, 100L, 2L, 500L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer2, 501L, map2, null, executor).join();
    map3 = ImmutableMap.of(segment4, 500L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer3, 502L, map3, null, executor).join();
    // run watermarking workflow. It should generate watermark that includes segments 3 -> 100L and 4 -> 500L with time 500L
    periodicWatermarking.watermark(stream).join();
    assertEquals(revisionedClient.watermarks.size(), 5);
    watermark = revisionedClient.watermarks.get(4).getValue();
    assertEquals(watermark.getLowerTimeBound(), 500L);
    assertEquals(watermark.getStreamCut().size(), 2);
    assertEquals(getSegmentOffset(watermark, segment3), 100L);
    assertEquals(getSegmentOffset(watermark, segment4), 500L);
}
Also used : WriterMark(io.pravega.controller.store.stream.records.WriterMark) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) RequestTracker(io.pravega.common.tracing.RequestTracker) Cleanup(lombok.Cleanup) StoreException(io.pravega.controller.store.stream.StoreException) SynchronizerClientFactory(io.pravega.client.SynchronizerClientFactory) StreamImpl(io.pravega.client.stream.impl.StreamImpl) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Watermark(io.pravega.shared.watermarks.Watermark) Test(org.junit.Test)

Example 8 with WriterMark

use of io.pravega.controller.store.stream.records.WriterMark in project pravega by pravega.

the class StreamMetadataStoreTest method testMarkOnTransactionCommit.

@Test(timeout = 30000)
public void testMarkOnTransactionCommit() {
    // create txn
    // seal txn with committing
    final String scope = "MarkOnTransactionCommit";
    final String stream = "MarkOnTransactionCommit";
    final ScalingPolicy policy = ScalingPolicy.fixed(1);
    final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
    long start = System.currentTimeMillis();
    store.createScope(scope, null, executor).join();
    store.createStream(scope, stream, configuration, start, null, executor).join();
    store.setState(scope, stream, State.ACTIVE, null, executor).join();
    UUID txnId = store.generateTransactionId(scope, stream, null, executor).join();
    VersionedTransactionData tx01 = store.createTransaction(scope, stream, txnId, 100, 100, null, executor).join();
    String writer1 = "writer1";
    long time = 1L;
    store.sealTransaction(scope, stream, txnId, true, Optional.of(tx01.getVersion()), writer1, time, null, executor).join();
    VersionedMetadata<CommittingTransactionsRecord> record = store.startCommitTransactions(scope, stream, 100, null, executor).join().getKey();
    store.completeCommitTransactions(scope, stream, record, null, executor, Collections.singletonMap(writer1, new TxnWriterMark(time, Collections.singletonMap(0L, 1L), txnId))).join();
    // verify that writer mark is created in the store
    WriterMark mark = store.getWriterMark(scope, stream, writer1, null, executor).join();
    assertEquals(mark.getTimestamp(), time);
    assertEquals(mark.getPosition().size(), 1);
    assertTrue(mark.getPosition().containsKey(0L));
    assertEquals(mark.getPosition().get(0L).longValue(), 1L);
}
Also used : ScalingPolicy(io.pravega.client.stream.ScalingPolicy) WriterMark(io.pravega.controller.store.stream.records.WriterMark) CommittingTransactionsRecord(io.pravega.controller.store.stream.records.CommittingTransactionsRecord) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) UUID(java.util.UUID) Test(org.junit.Test)

Example 9 with WriterMark

use of io.pravega.controller.store.stream.records.WriterMark in project pravega by pravega.

the class StreamTestBase method testWriterMark.

@Test(timeout = 30000L)
public void testWriterMark() {
    OperationContext context = getContext();
    PersistentStreamBase stream = spy(createStream("writerMark", "writerMark", System.currentTimeMillis(), 3, 0));
    Map<String, WriterMark> marks = stream.getAllWriterMarks(context).join();
    assertTrue(marks.isEmpty());
    // call noteWritermark --> this should call createMarkerRecord
    String writer = "writer";
    long timestamp = 0L;
    Map<Long, Long> position = Collections.singletonMap(0L, 1L);
    ImmutableMap<Long, Long> immutablePos = ImmutableMap.copyOf(position);
    stream.noteWriterMark(writer, timestamp, position, context).join();
    marks = stream.getAllWriterMarks(context).join();
    assertEquals(marks.size(), 1);
    verify(stream, times(1)).createWriterMarkRecord(writer, timestamp, immutablePos, context);
    VersionedMetadata<WriterMark> mark = stream.getWriterMarkRecord(writer, context).join();
    Version version = mark.getVersion();
    // call noteWritermark --> this should call update
    stream.noteWriterMark(writer, timestamp, position, context).join();
    marks = stream.getAllWriterMarks(context).join();
    assertEquals(marks.size(), 1);
    mark = stream.getWriterMarkRecord(writer, context).join();
    assertNotEquals(mark.getVersion(), version);
    verify(stream, times(1)).updateWriterMarkRecord(anyString(), anyLong(), any(), anyBoolean(), any(), any());
    AssertExtensions.assertFutureThrows("", stream.createWriterMarkRecord(writer, timestamp, immutablePos, context), e -> Exceptions.unwrap(e) instanceof StoreException.DataExistsException);
    // update
    mark = stream.getWriterMarkRecord(writer, context).join();
    stream.updateWriterMarkRecord(writer, timestamp, immutablePos, true, mark.getVersion(), context).join();
    // verify bad version on update
    AssertExtensions.assertFutureThrows("", stream.updateWriterMarkRecord(writer, timestamp, immutablePos, true, mark.getVersion(), context), e -> Exceptions.unwrap(e) instanceof StoreException.WriteConflictException);
    mark = stream.getWriterMarkRecord(writer, context).join();
    // update deleted writer --> data not found
    stream.removeWriter(writer, mark.getObject(), context).join();
    marks = stream.getAllWriterMarks(context).join();
    assertEquals(marks.size(), 0);
    AssertExtensions.assertFutureThrows("", stream.updateWriterMarkRecord(writer, timestamp, immutablePos, true, mark.getVersion(), context), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException);
    // create writer record
    stream.createWriterMarkRecord(writer, timestamp, immutablePos, context).join();
    // Mock to throw DataNotFound for getWriterMark. This should result in noteWriterMark to attempt to create.
    // That should fail with DataExists resulting in recursive call into noteWriterMark to do get and update.
    AtomicBoolean callRealMethod = new AtomicBoolean(false);
    doAnswer(x -> {
        if (callRealMethod.compareAndSet(false, true)) {
            return Futures.failedFuture(StoreException.create(StoreException.Type.DATA_NOT_FOUND, "writer mark"));
        } else {
            return x.callRealMethod();
        }
    }).when(stream).getWriterMarkRecord(writer, context);
    timestamp = 1L;
    position = Collections.singletonMap(0L, 2L);
    AssertExtensions.assertFutureThrows("Expecting WriteConflict", stream.noteWriterMark(writer, timestamp, position, context), e -> Exceptions.unwrap(e) instanceof StoreException.WriteConflictException);
}
Also used : TestOperationContext(io.pravega.controller.store.TestOperationContext) WriterMark(io.pravega.controller.store.stream.records.WriterMark) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Version(io.pravega.controller.store.Version) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) AtomicLong(java.util.concurrent.atomic.AtomicLong) Test(org.junit.Test)

Example 10 with WriterMark

use of io.pravega.controller.store.stream.records.WriterMark in project pravega by pravega.

the class ZKStream method updateWriterMarkRecord.

@Override
CompletableFuture<Void> updateWriterMarkRecord(String writer, long timestamp, ImmutableMap<Long, Long> position, boolean isAlive, Version version, OperationContext context) {
    String writerPath = getWriterPath(writer);
    WriterMark mark = new WriterMark(timestamp, position, isAlive);
    return Futures.toVoid(store.setData(writerPath, mark.toBytes(), version));
}
Also used : WriterMark(io.pravega.controller.store.stream.records.WriterMark)

Aggregations

WriterMark (io.pravega.controller.store.stream.records.WriterMark)16 Test (org.junit.Test)9 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)7 ImmutableMap (com.google.common.collect.ImmutableMap)6 HashMap (java.util.HashMap)6 VisibleForTesting (com.google.common.annotations.VisibleForTesting)5 SynchronizerClientFactory (io.pravega.client.SynchronizerClientFactory)5 Exceptions (io.pravega.common.Exceptions)5 Futures (io.pravega.common.concurrent.Futures)5 TagLogger (io.pravega.common.tracing.TagLogger)5 EpochRecord (io.pravega.controller.store.stream.records.EpochRecord)5 Watermark (io.pravega.shared.watermarks.Watermark)5 ArrayList (java.util.ArrayList)5 Collections (java.util.Collections)5 List (java.util.List)5 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)5 Stream (io.pravega.client.stream.Stream)4 RequestTracker (io.pravega.common.tracing.RequestTracker)4 Map (java.util.Map)4 UUID (java.util.UUID)4