Search in sources :

Example 16 with BucketStore

use of io.pravega.controller.store.stream.BucketStore in project pravega by pravega.

the class RetentionServiceHealthContributorTest method setup.

@Before
public void setup() {
    BucketStore bucketStore = mock(ZookeeperBucketStore.class);
    doReturn(StoreType.Zookeeper).when(bucketStore).getStoreType();
    String hostId = UUID.randomUUID().toString();
    BucketServiceFactory bucketStoreFactory = spy(new BucketServiceFactory(hostId, bucketStore, 2));
    ScheduledExecutorService executor = mock(ScheduledExecutorService.class);
    PeriodicRetention periodicRetention = mock(PeriodicRetention.class);
    retentionService = spy(bucketStoreFactory.createWatermarkingService(Duration.ofMillis(5), periodicRetention::retention, executor));
    doReturn(CompletableFuture.completedFuture(null)).when((ZooKeeperBucketManager) retentionService).initializeService();
    doNothing().when((ZooKeeperBucketManager) retentionService).startBucketOwnershipListener();
    doReturn(true).when(retentionService).isHealthy();
    contributor = new RetentionServiceHealthContributor("retentionservice", retentionService);
    builder = Health.builder().name("retentionservice");
}
Also used : ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) BucketServiceFactory(io.pravega.controller.server.bucket.BucketServiceFactory) PeriodicRetention(io.pravega.controller.server.bucket.PeriodicRetention) BucketStore(io.pravega.controller.store.stream.BucketStore) ZookeeperBucketStore(io.pravega.controller.store.stream.ZookeeperBucketStore) ZooKeeperBucketManager(io.pravega.controller.server.bucket.ZooKeeperBucketManager) Before(org.junit.Before)

Example 17 with BucketStore

use of io.pravega.controller.store.stream.BucketStore in project pravega by pravega.

the class InMemoryControllerServiceImplTest method getControllerService.

@Override
public ControllerService getControllerService() {
    executorService = ExecutorServiceHelpers.newScheduledThreadPool(20, "testpool");
    taskMetadataStore = TaskStoreFactoryForTests.createInMemoryStore(executorService);
    streamStore = StreamStoreFactory.createInMemoryStore();
    BucketStore bucketStore = StreamStoreFactory.createInMemoryBucketStore();
    StreamMetrics.initialize();
    TransactionMetrics.initialize();
    segmentHelper = SegmentHelperMock.getSegmentHelperMockForTables(executorService);
    EventHelper helperMock = EventHelperMock.getEventHelperMock(executorService, "host", ((AbstractStreamMetadataStore) streamStore).getHostTaskIndex());
    streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelper, executorService, "host", GrpcAuthHelper.getDisabledAuthHelper(), helperMock);
    streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelper, executorService, "host", GrpcAuthHelper.getDisabledAuthHelper());
    this.kvtStore = KVTableStoreFactory.createInMemoryStore(streamStore, executorService);
    EventHelper tableEventHelper = EventHelperMock.getEventHelperMock(executorService, "host", ((AbstractKVTableMetadataStore) kvtStore).getHostTaskIndex());
    this.kvtMetadataTasks = new TableMetadataTasks(kvtStore, segmentHelper, executorService, executorService, "host", GrpcAuthHelper.getDisabledAuthHelper(), tableEventHelper);
    this.tableRequestHandler = new TableRequestHandler(new CreateTableTask(this.kvtStore, this.kvtMetadataTasks, executorService), new DeleteTableTask(this.kvtStore, this.kvtMetadataTasks, executorService), this.kvtStore, executorService);
    this.streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executorService), new ScaleOperationTask(streamMetadataTasks, streamStore, executorService), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executorService), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executorService), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executorService), new TruncateStreamTask(streamMetadataTasks, streamStore, executorService), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executorService), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executorService), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executorService), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtMetadataTasks, executorService), executorService);
    streamMetadataTasks.setRequestEventWriter(new ControllerEventStreamWriterMock(streamRequestHandler, executorService));
    streamTransactionMetadataTasks.initializeStreamWriters(new EventStreamWriterMock<>(), new EventStreamWriterMock<>());
    tableEventHelper.setRequestEventWriter(new ControllerEventTableWriterMock(tableRequestHandler, executorService));
    Cluster mockCluster = mock(Cluster.class);
    when(mockCluster.getClusterMembers()).thenReturn(Collections.singleton(new Host("localhost", 9090, null)));
    return new ControllerService(kvtStore, kvtMetadataTasks, streamStore, StreamStoreFactory.createInMemoryBucketStore(), streamMetadataTasks, streamTransactionMetadataTasks, SegmentHelperMock.getSegmentHelperMock(), executorService, mockCluster, requestTracker);
}
Also used : CreateTableTask(io.pravega.controller.server.eventProcessor.requesthandlers.kvtable.CreateTableTask) DeleteScopeTask(io.pravega.controller.server.eventProcessor.requesthandlers.DeleteScopeTask) DeleteTableTask(io.pravega.controller.server.eventProcessor.requesthandlers.kvtable.DeleteTableTask) CreateReaderGroupTask(io.pravega.controller.server.eventProcessor.requesthandlers.CreateReaderGroupTask) ControllerService(io.pravega.controller.server.ControllerService) AutoScaleTask(io.pravega.controller.server.eventProcessor.requesthandlers.AutoScaleTask) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) StreamTransactionMetadataTasks(io.pravega.controller.task.Stream.StreamTransactionMetadataTasks) TableMetadataTasks(io.pravega.controller.task.KeyValueTable.TableMetadataTasks) BucketStore(io.pravega.controller.store.stream.BucketStore) StreamMetadataTasks(io.pravega.controller.task.Stream.StreamMetadataTasks) SealStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.SealStreamTask) UpdateStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.UpdateStreamTask) DeleteReaderGroupTask(io.pravega.controller.server.eventProcessor.requesthandlers.DeleteReaderGroupTask) Cluster(io.pravega.common.cluster.Cluster) Host(io.pravega.common.cluster.Host) ScaleOperationTask(io.pravega.controller.server.eventProcessor.requesthandlers.ScaleOperationTask) StreamRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.StreamRequestHandler) ControllerEventTableWriterMock(io.pravega.controller.mocks.ControllerEventTableWriterMock) TableRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.kvtable.TableRequestHandler) DeleteStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.DeleteStreamTask) UpdateReaderGroupTask(io.pravega.controller.server.eventProcessor.requesthandlers.UpdateReaderGroupTask) EventHelper(io.pravega.controller.task.EventHelper) TruncateStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.TruncateStreamTask)

Example 18 with BucketStore

use of io.pravega.controller.store.stream.BucketStore in project pravega by pravega.

the class ZkStoreBucketServiceTest method testMaxConcurrentJobs.

@Test(timeout = 30000)
public void testMaxConcurrentJobs() {
    String scope = "scope";
    String stream1 = "stream1";
    String stream2 = "stream2";
    String stream3 = "stream3";
    String stream4 = "stream4";
    ConcurrentHashMap<String, CompletableFuture<Void>> streamWorkLatch = new ConcurrentHashMap<>();
    ConcurrentHashMap<String, CompletableFuture<Void>> streamWorkFuture = new ConcurrentHashMap<>();
    streamWorkFuture.put(stream1, new CompletableFuture<>());
    streamWorkFuture.put(stream2, new CompletableFuture<>());
    streamWorkFuture.put(stream3, new CompletableFuture<>());
    streamWorkFuture.put(stream4, new CompletableFuture<>());
    streamWorkLatch.put(stream1, new CompletableFuture<>());
    streamWorkLatch.put(stream2, new CompletableFuture<>());
    streamWorkLatch.put(stream3, new CompletableFuture<>());
    streamWorkLatch.put(stream4, new CompletableFuture<>());
    BucketWork bucketWork = x -> {
        // indicate that the work has been called
        streamWorkLatch.get(x.getStreamName()).complete(null);
        return streamWorkFuture.get(x.getStreamName());
    };
    BucketService service = new ZooKeeperBucketService(BucketStore.ServiceType.RetentionService, 0, (ZookeeperBucketStore) bucketStore, executor, 2, Duration.ofSeconds(2), bucketWork);
    service.startAsync();
    service.awaitRunning();
    // verify that we add a new stream and it gets added
    // send notification for stream1 and verify that it's work gets called
    BucketService.StreamNotification notification = new BucketService.StreamNotification(scope, stream1, BucketService.NotificationType.StreamAdded);
    service.notify(notification);
    AtomicBoolean added = new AtomicBoolean(false);
    Futures.loop(added::get, () -> {
        added.set(service.getKnownStreams().stream().anyMatch(x -> x.getStreamName().equals(stream1)));
        return CompletableFuture.completedFuture(null);
    }, executor).join();
    // wait for stream work to be called
    streamWorkLatch.get(stream1).join();
    // complete stream 1 work
    streamWorkFuture.get(stream1).complete(null);
    Set<Stream> streams = service.getKnownStreams();
    assertEquals(streams.size(), 1);
    Stream stream1obj = streams.iterator().next();
    // now send notification to remove the stream
    notification = new BucketService.StreamNotification(scope, stream1, BucketService.NotificationType.StreamRemoved);
    service.notify(notification);
    // whenever notification loop kicks in, it should remove the stream from known streams and worker queue.
    AtomicBoolean removed = new AtomicBoolean(false);
    Futures.loop(removed::get, () -> {
        removed.set(!service.getKnownStreams().contains(stream1obj));
        return CompletableFuture.completedFuture(null);
    }, executor).join();
    // the work should also be removed from worker queue
    Futures.loop(removed::get, () -> {
        removed.set(service.getWorkerQueue().stream().noneMatch(x -> x.getStream().equals(stream1obj)));
        return CompletableFuture.completedFuture(null);
    }, executor).join();
    // send notification for addition of stream2, stream3
    notification = new BucketService.StreamNotification(scope, stream2, BucketService.NotificationType.StreamAdded);
    service.notify(notification);
    notification = new BucketService.StreamNotification(scope, stream3, BucketService.NotificationType.StreamAdded);
    service.notify(notification);
    // wait for two "work" to be called.. wait on those latches to complete
    streamWorkLatch.get(stream2).join();
    streamWorkLatch.get(stream3).join();
    assertEquals(service.getWorkerQueue().size(), 0);
    // send notification for addition of stream4
    notification = new BucketService.StreamNotification(scope, stream4, BucketService.NotificationType.StreamAdded);
    service.notify(notification);
    // its work should not get called!
    Collection<BucketService.QueueElement> workerQueueAfterDelay = Futures.delayedFuture(() -> CompletableFuture.completedFuture(service.getWorkerQueue()), 5000, executor).join();
    assertFalse(streamWorkLatch.get(stream4).isDone());
    // worker queue should still have the element for stream4 waiting
    assertEquals(workerQueueAfterDelay.size(), 1);
    assertEquals(workerQueueAfterDelay.iterator().next().getStream().getStreamName(), stream4);
    // finish one of the work and we shall have worker queue pick up work from stream4
    streamWorkFuture.get(stream2).complete(null);
    // stream 4's work should be called and completed
    streamWorkLatch.get(stream4).join();
}
Also used : CuratorFrameworkFactory(org.apache.curator.framework.CuratorFrameworkFactory) StreamStoreFactory(io.pravega.controller.store.stream.StreamStoreFactory) StreamImpl(io.pravega.client.stream.impl.StreamImpl) SegmentHelper(io.pravega.controller.server.SegmentHelper) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Cleanup(lombok.Cleanup) CompletableFuture(java.util.concurrent.CompletableFuture) BucketStore(io.pravega.controller.store.stream.BucketStore) TaskMetadataStore(io.pravega.controller.store.task.TaskMetadataStore) TestingServerStarter(io.pravega.test.common.TestingServerStarter) Stream(io.pravega.client.stream.Stream) ZookeeperBucketStore(io.pravega.controller.store.stream.ZookeeperBucketStore) Duration(java.time.Duration) Map(java.util.Map) After(org.junit.After) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) TestingServer(org.apache.curator.test.TestingServer) StreamMetadataTasks(io.pravega.controller.task.Stream.StreamMetadataTasks) RetryHelper(io.pravega.controller.util.RetryHelper) Before(org.junit.Before) SegmentHelperMock(io.pravega.controller.mocks.SegmentHelperMock) ImmutableMap(com.google.common.collect.ImmutableMap) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) RequestTracker(io.pravega.common.tracing.RequestTracker) UUID(java.util.UUID) TaskStoreFactory(io.pravega.controller.store.task.TaskStoreFactory) CuratorFramework(org.apache.curator.framework.CuratorFramework) Assert.assertFalse(org.junit.Assert.assertFalse) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) StreamMetadataStore(io.pravega.controller.store.stream.StreamMetadataStore) Futures(io.pravega.common.concurrent.Futures) Assert.assertEquals(org.junit.Assert.assertEquals) GrpcAuthHelper(io.pravega.controller.server.security.auth.GrpcAuthHelper) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) CompletableFuture(java.util.concurrent.CompletableFuture) Stream(io.pravega.client.stream.Stream) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Test(org.junit.Test)

Example 19 with BucketStore

use of io.pravega.controller.store.stream.BucketStore in project pravega by pravega.

the class ZkStoreBucketServiceTest method testOwnershipOfExistingBucket.

@Test(timeout = 60000)
public void testOwnershipOfExistingBucket() throws Exception {
    RequestTracker requestTracker = new RequestTracker(true);
    TestingServer zkServer2 = new TestingServerStarter().start();
    zkServer2.start();
    CuratorFramework zkClient2 = CuratorFrameworkFactory.newClient(zkServer2.getConnectString(), 10000, 1000, (r, e, s) -> false);
    zkClient2.start();
    @Cleanup("shutdownNow") ScheduledExecutorService executor2 = ExecutorServiceHelpers.newScheduledThreadPool(10, "test");
    String hostId = UUID.randomUUID().toString();
    BucketStore bucketStore2 = StreamStoreFactory.createZKBucketStore(ImmutableMap.of(BucketStore.ServiceType.RetentionService, 1), zkClient2, executor2);
    StreamMetadataStore streamMetadataStore2 = StreamStoreFactory.createZKStore(zkClient2, executor2);
    TaskMetadataStore taskMetadataStore = TaskStoreFactory.createInMemoryStore(executor2);
    SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock();
    StreamMetadataTasks streamMetadataTasks2 = new StreamMetadataTasks(streamMetadataStore2, bucketStore2, taskMetadataStore, segmentHelper, executor2, hostId, GrpcAuthHelper.getDisabledAuthHelper());
    String scope = "scope1";
    String streamName = "stream1";
    bucketStore2.addStreamToBucketStore(BucketStore.ServiceType.RetentionService, scope, streamName, executor2).join();
    String scope2 = "scope2";
    String streamName2 = "stream2";
    bucketStore2.addStreamToBucketStore(BucketStore.ServiceType.RetentionService, scope2, streamName2, executor2).join();
    BucketServiceFactory bucketStoreFactory = new BucketServiceFactory(hostId, bucketStore2, 5);
    BucketManager service2 = bucketStoreFactory.createRetentionService(Duration.ofMillis(5000), stream -> CompletableFuture.completedFuture(null), executor2);
    service2.startAsync();
    service2.awaitRunning();
    Thread.sleep(10000);
    assertTrue(service2.getBucketServices().values().stream().allMatch(x -> x.getKnownStreams().size() == 2));
    service2.stopAsync();
    service2.awaitTerminated();
    zkClient2.close();
    zkServer2.close();
    streamMetadataTasks2.close();
    ExecutorServiceHelpers.shutdown(executor2);
}
Also used : TestingServer(org.apache.curator.test.TestingServer) CuratorFrameworkFactory(org.apache.curator.framework.CuratorFrameworkFactory) StreamStoreFactory(io.pravega.controller.store.stream.StreamStoreFactory) StreamImpl(io.pravega.client.stream.impl.StreamImpl) SegmentHelper(io.pravega.controller.server.SegmentHelper) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Cleanup(lombok.Cleanup) CompletableFuture(java.util.concurrent.CompletableFuture) BucketStore(io.pravega.controller.store.stream.BucketStore) TaskMetadataStore(io.pravega.controller.store.task.TaskMetadataStore) TestingServerStarter(io.pravega.test.common.TestingServerStarter) Stream(io.pravega.client.stream.Stream) ZookeeperBucketStore(io.pravega.controller.store.stream.ZookeeperBucketStore) Duration(java.time.Duration) Map(java.util.Map) After(org.junit.After) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) TestingServer(org.apache.curator.test.TestingServer) StreamMetadataTasks(io.pravega.controller.task.Stream.StreamMetadataTasks) RetryHelper(io.pravega.controller.util.RetryHelper) Before(org.junit.Before) SegmentHelperMock(io.pravega.controller.mocks.SegmentHelperMock) ImmutableMap(com.google.common.collect.ImmutableMap) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) RequestTracker(io.pravega.common.tracing.RequestTracker) UUID(java.util.UUID) TaskStoreFactory(io.pravega.controller.store.task.TaskStoreFactory) CuratorFramework(org.apache.curator.framework.CuratorFramework) Assert.assertFalse(org.junit.Assert.assertFalse) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) StreamMetadataStore(io.pravega.controller.store.stream.StreamMetadataStore) Futures(io.pravega.common.concurrent.Futures) Assert.assertEquals(org.junit.Assert.assertEquals) GrpcAuthHelper(io.pravega.controller.server.security.auth.GrpcAuthHelper) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) TestingServerStarter(io.pravega.test.common.TestingServerStarter) TaskMetadataStore(io.pravega.controller.store.task.TaskMetadataStore) RequestTracker(io.pravega.common.tracing.RequestTracker) StreamMetadataStore(io.pravega.controller.store.stream.StreamMetadataStore) SegmentHelper(io.pravega.controller.server.SegmentHelper) Cleanup(lombok.Cleanup) CuratorFramework(org.apache.curator.framework.CuratorFramework) BucketStore(io.pravega.controller.store.stream.BucketStore) ZookeeperBucketStore(io.pravega.controller.store.stream.ZookeeperBucketStore) StreamMetadataTasks(io.pravega.controller.task.Stream.StreamMetadataTasks) Test(org.junit.Test)

Example 20 with BucketStore

use of io.pravega.controller.store.stream.BucketStore in project pravega by pravega.

the class WatermarkWorkflowTest method testWriterTimeout.

@Test(timeout = 30000L)
public void testWriterTimeout() {
    SynchronizerClientFactory clientFactory = spy(SynchronizerClientFactory.class);
    ConcurrentHashMap<String, MockRevisionedStreamClient> revisionedStreamClientMap = new ConcurrentHashMap<>();
    doAnswer(x -> {
        String streamName = x.getArgument(0);
        return revisionedStreamClientMap.compute(streamName, (s, rsc) -> {
            if (rsc != null) {
                return rsc;
            } else {
                return new MockRevisionedStreamClient();
            }
        });
    }).when(clientFactory).createRevisionedStreamClient(anyString(), any(), any());
    StreamMetadataStore streamMetadataStoreSpied = spy(this.streamMetadataStore);
    BucketStore bucketStoreSpied = spy(this.bucketStore);
    @Cleanup PeriodicWatermarking periodicWatermarking = new PeriodicWatermarking(streamMetadataStoreSpied, bucketStoreSpied, sp -> clientFactory, executor, new RequestTracker(false));
    String streamName = "stream";
    String scope = "scope";
    streamMetadataStoreSpied.createScope(scope, null, executor).join();
    streamMetadataStoreSpied.createStream(scope, streamName, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(3)).timestampAggregationTimeout(3000L).build(), System.currentTimeMillis(), null, executor).join();
    streamMetadataStoreSpied.setState(scope, streamName, State.ACTIVE, null, executor).join();
    // 2. note writer1, writer2, writer3 marks
    // writer 1 reports segments 0, 1.
    // writer 2 reports segments 1, 2,
    // writer 3 reports segment 0, 2
    String writer1 = "writer1";
    streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer1, 102L, ImmutableMap.of(0L, 100L, 1L, 0L, 2L, 0L), null, executor).join();
    String writer2 = "writer2";
    streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer2, 101L, ImmutableMap.of(0L, 0L, 1L, 100L, 2L, 0L), null, executor).join();
    String writer3 = "writer3";
    streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer3, 100L, ImmutableMap.of(0L, 0L, 1L, 0L, 2L, 100L), null, executor).join();
    // 3. run watermarking workflow.
    StreamImpl stream = new StreamImpl(scope, streamName);
    periodicWatermarking.watermark(stream).join();
    // verify that a watermark has been emitted.
    MockRevisionedStreamClient revisionedClient = revisionedStreamClientMap.get(NameUtils.getMarkStreamForStream(streamName));
    assertEquals(revisionedClient.watermarks.size(), 1);
    // Don't report time from writer3
    streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer1, 200L, ImmutableMap.of(0L, 200L, 1L, 0L, 2L, 0L), null, executor).join();
    streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer2, 200L, ImmutableMap.of(0L, 0L, 1L, 200L, 2L, 0L), null, executor).join();
    // no new watermark should be emitted, writers should be tracked for inactivity
    periodicWatermarking.watermark(stream).join();
    assertEquals(revisionedClient.watermarks.size(), 1);
    verify(streamMetadataStoreSpied, never()).removeWriter(anyString(), anyString(), anyString(), any(), any(), any());
    verify(bucketStoreSpied, never()).removeStreamFromBucketStore(any(), anyString(), anyString(), any());
    // call again. Still no new watermark should be emitted as writers have not timed out
    periodicWatermarking.watermark(stream).join();
    assertEquals(revisionedClient.watermarks.size(), 1);
    verify(streamMetadataStoreSpied, never()).removeWriter(anyString(), anyString(), anyString(), any(), any(), any());
    verify(bucketStoreSpied, never()).removeStreamFromBucketStore(any(), anyString(), anyString(), any());
    // call watermark after a delay of 5 more seconds. The writer3 should timeout because it has a timeout of 3 seconds.
    Futures.delayedFuture(() -> periodicWatermarking.watermark(stream), 5000L, executor).join();
    verify(streamMetadataStoreSpied, times(1)).removeWriter(anyString(), anyString(), anyString(), any(), any(), any());
    verify(bucketStoreSpied, never()).removeStreamFromBucketStore(any(), anyString(), anyString(), any());
    // watermark should be emitted. without considering writer3
    assertEquals(revisionedClient.watermarks.size(), 2);
    Watermark watermark = revisionedClient.watermarks.get(1).getValue();
    assertEquals(watermark.getLowerTimeBound(), 200L);
    assertEquals(watermark.getStreamCut().size(), 3);
    assertEquals(getSegmentOffset(watermark, 0L), 200L);
    assertEquals(getSegmentOffset(watermark, 1L), 200L);
    assertEquals(getSegmentOffset(watermark, 2L), 100L);
    // call watermark workflow again so that both writers are tracked for inactivity
    periodicWatermarking.watermark(stream).join();
    assertEquals(revisionedClient.watermarks.size(), 2);
    verify(streamMetadataStoreSpied, times(1)).removeWriter(anyString(), anyString(), anyString(), any(), any(), any());
    verify(bucketStoreSpied, never()).removeStreamFromBucketStore(any(), anyString(), anyString(), any());
    // now introduce more delays and see all writers are removed and stream is discontinued from watermarking computation.
    Futures.delayedFuture(() -> periodicWatermarking.watermark(stream), 5000L, executor).join();
    // verify that stream is discontinued from tracking for watermarking
    verify(streamMetadataStoreSpied, times(3)).removeWriter(anyString(), anyString(), anyString(), any(), any(), any());
    verify(bucketStoreSpied, times(1)).removeStreamFromBucketStore(any(), anyString(), anyString(), any());
    // call note time for writer3 and verify that watermark is emitted.
    streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer3, 300L, ImmutableMap.of(0L, 300L, 1L, 0L, 2L, 0L), null, executor).join();
    periodicWatermarking.watermark(stream).join();
    assertEquals(revisionedClient.watermarks.size(), 3);
    watermark = revisionedClient.watermarks.get(2).getValue();
    assertEquals(watermark.getLowerTimeBound(), 300L);
    assertEquals(watermark.getStreamCut().size(), 3);
    assertEquals(getSegmentOffset(watermark, 0L), 300L);
    assertEquals(getSegmentOffset(watermark, 1L), 200L);
    assertEquals(getSegmentOffset(watermark, 2L), 100L);
}
Also used : SynchronizerClientFactory(io.pravega.client.SynchronizerClientFactory) StreamImpl(io.pravega.client.stream.impl.StreamImpl) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) StreamMetadataStore(io.pravega.controller.store.stream.StreamMetadataStore) BucketStore(io.pravega.controller.store.stream.BucketStore) RequestTracker(io.pravega.common.tracing.RequestTracker) Cleanup(lombok.Cleanup) Watermark(io.pravega.shared.watermarks.Watermark) Test(org.junit.Test)

Aggregations

BucketStore (io.pravega.controller.store.stream.BucketStore)26 TableMetadataTasks (io.pravega.controller.task.KeyValueTable.TableMetadataTasks)19 StreamMetadataTasks (io.pravega.controller.task.Stream.StreamMetadataTasks)18 StreamMetadataStore (io.pravega.controller.store.stream.StreamMetadataStore)17 StreamTransactionMetadataTasks (io.pravega.controller.task.Stream.StreamTransactionMetadataTasks)16 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)15 Test (org.junit.Test)15 Before (org.junit.Before)14 GrpcAuthHelper (io.pravega.controller.server.security.auth.GrpcAuthHelper)13 KVTableMetadataStore (io.pravega.controller.store.kvtable.KVTableMetadataStore)13 TaskMetadataStore (io.pravega.controller.store.task.TaskMetadataStore)13 CompletableFuture (java.util.concurrent.CompletableFuture)13 RequestTracker (io.pravega.common.tracing.RequestTracker)12 Cleanup (lombok.Cleanup)12 CuratorFramework (org.apache.curator.framework.CuratorFramework)11 ExecutorServiceHelpers (io.pravega.common.concurrent.ExecutorServiceHelpers)10 AutoScaleTask (io.pravega.controller.server.eventProcessor.requesthandlers.AutoScaleTask)9 CreateReaderGroupTask (io.pravega.controller.server.eventProcessor.requesthandlers.CreateReaderGroupTask)9 DeleteReaderGroupTask (io.pravega.controller.server.eventProcessor.requesthandlers.DeleteReaderGroupTask)9 DeleteScopeTask (io.pravega.controller.server.eventProcessor.requesthandlers.DeleteScopeTask)9