use of io.pravega.controller.store.stream.StreamMetadataStore in project pravega by pravega.
the class EventProcessorHealthContributorTest method setup.
@SneakyThrows
@Before
public void setup() {
Host host = mock(Host.class);
LocalController localController = mock(LocalController.class);
CheckpointStore checkpointStore = mock(CheckpointStore.class);
StreamMetadataStore streamStore = mock(StreamMetadataStore.class);
BucketStore bucketStore = mock(BucketStore.class);
ConnectionPool connectionPool = mock(ConnectionPool.class);
StreamMetadataTasks streamMetadataTasks = mock(StreamMetadataTasks.class);
StreamTransactionMetadataTasks streamTransactionMetadataTasks = mock(StreamTransactionMetadataTasks.class);
ScheduledExecutorService executor = mock(ScheduledExecutorService.class);
KVTableMetadataStore kvtMetadataStore = mock(KVTableMetadataStore.class);
TableMetadataTasks kvtMetadataTasks = mock(TableMetadataTasks.class);
EventProcessorSystem system = mock(EventProcessorSystemImpl.class);
ControllerEventProcessorConfig config = ControllerEventProcessorConfigImpl.withDefault();
EventProcessorGroup<ControllerEvent> processor = getProcessor();
EventProcessorGroup<ControllerEvent> mockProcessor = spy(processor);
doThrow(new CheckpointStoreException("host not found")).when(mockProcessor).notifyProcessFailure("host3");
when(system.createEventProcessorGroup(any(), any(), any())).thenReturn(mockProcessor);
eventProcessors = spy(new ControllerEventProcessors(host.getHostId(), config, localController, checkpointStore, streamStore, bucketStore, connectionPool, streamMetadataTasks, streamTransactionMetadataTasks, kvtMetadataStore, kvtMetadataTasks, system, executorService()));
doReturn(true).when(eventProcessors).isReady();
contributor = new EventProcessorHealthContributor("eventprocessors", eventProcessors);
builder = Health.builder().name("eventprocessors");
}
use of io.pravega.controller.store.stream.StreamMetadataStore in project pravega by pravega.
the class ScaleRequestHandlerTest method concurrentDistinctScaleRun.
// concurrent run of scale 1 intermixed with scale 2
private void concurrentDistinctScaleRun(String stream, String funcToWaitOn, boolean isManual, Predicate<Throwable> firstExceptionPredicate, Map<String, Integer> invocationCount) throws Exception {
StreamMetadataStore streamStore1 = getStore();
StreamMetadataStore streamStore1Spied = spy(getStore());
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 1)).build();
streamStore1.createStream(scope, stream, config, System.currentTimeMillis(), null, executor).join();
streamStore1.setState(scope, stream, State.ACTIVE, null, executor).join();
CompletableFuture<Void> wait = new CompletableFuture<>();
CompletableFuture<Void> signal = new CompletableFuture<>();
ScaleOpEvent event = new ScaleOpEvent(scope, stream, Lists.newArrayList(0L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), isManual, System.currentTimeMillis(), System.currentTimeMillis());
if (isManual) {
streamStore1.submitScale(scope, stream, Lists.newArrayList(0L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), System.currentTimeMillis(), null, null, executor).join();
}
StreamMetadataStore streamStore2 = getStore();
ScaleOperationTask scaleRequestHandler1 = new ScaleOperationTask(streamMetadataTasks, streamStore1Spied, executor);
ScaleOperationTask scaleRequestHandler2 = new ScaleOperationTask(streamMetadataTasks, streamStore2, executor);
setMockLatch(streamStore1, streamStore1Spied, funcToWaitOn, signal, wait);
CompletableFuture<Void> future1 = CompletableFuture.completedFuture(null).thenComposeAsync(v -> scaleRequestHandler1.execute(event), executor);
signal.join();
// let this run to completion. this should succeed
scaleRequestHandler2.execute(event).join();
long one = NameUtils.computeSegmentId(1, 1);
ScaleOpEvent event2 = new ScaleOpEvent(scope, stream, Lists.newArrayList(one), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), isManual, System.currentTimeMillis(), System.currentTimeMillis());
if (isManual) {
streamStore1.submitScale(scope, stream, Lists.newArrayList(one), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), System.currentTimeMillis(), null, null, executor).join();
}
scaleRequestHandler2.execute(event2).join();
// now complete wait latch.
wait.complete(null);
AssertExtensions.assertSuppliedFutureThrows("first scale should fail", () -> future1, firstExceptionPredicate);
verify(streamStore1Spied, times(invocationCount.get("startScale"))).startScale(anyString(), anyString(), anyBoolean(), any(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("scaleCreateNewEpochs"))).scaleCreateNewEpochs(anyString(), anyString(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("scaleSegmentsSealed"))).scaleSegmentsSealed(anyString(), anyString(), any(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("completeScale"))).completeScale(anyString(), anyString(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("updateVersionedState"))).updateVersionedState(anyString(), anyString(), any(), any(), any(), any());
// validate scale done
VersionedMetadata<EpochTransitionRecord> versioned = streamStore1.getEpochTransition(scope, stream, null, executor).join();
assertEquals(EpochTransitionRecord.EMPTY, versioned.getObject());
assertEquals(4, getVersionNumber(versioned));
assertEquals(2, streamStore1.getActiveEpoch(scope, stream, null, true, executor).join().getEpoch());
assertEquals(State.ACTIVE, streamStore1.getState(scope, stream, true, null, executor).join());
streamStore1.close();
streamStore2.close();
}
use of io.pravega.controller.store.stream.StreamMetadataStore in project pravega by pravega.
the class ZkStoreBucketServiceTest method testOwnershipOfExistingBucket.
@Test(timeout = 60000)
public void testOwnershipOfExistingBucket() throws Exception {
RequestTracker requestTracker = new RequestTracker(true);
TestingServer zkServer2 = new TestingServerStarter().start();
zkServer2.start();
CuratorFramework zkClient2 = CuratorFrameworkFactory.newClient(zkServer2.getConnectString(), 10000, 1000, (r, e, s) -> false);
zkClient2.start();
@Cleanup("shutdownNow") ScheduledExecutorService executor2 = ExecutorServiceHelpers.newScheduledThreadPool(10, "test");
String hostId = UUID.randomUUID().toString();
BucketStore bucketStore2 = StreamStoreFactory.createZKBucketStore(ImmutableMap.of(BucketStore.ServiceType.RetentionService, 1), zkClient2, executor2);
StreamMetadataStore streamMetadataStore2 = StreamStoreFactory.createZKStore(zkClient2, executor2);
TaskMetadataStore taskMetadataStore = TaskStoreFactory.createInMemoryStore(executor2);
SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock();
StreamMetadataTasks streamMetadataTasks2 = new StreamMetadataTasks(streamMetadataStore2, bucketStore2, taskMetadataStore, segmentHelper, executor2, hostId, GrpcAuthHelper.getDisabledAuthHelper());
String scope = "scope1";
String streamName = "stream1";
bucketStore2.addStreamToBucketStore(BucketStore.ServiceType.RetentionService, scope, streamName, executor2).join();
String scope2 = "scope2";
String streamName2 = "stream2";
bucketStore2.addStreamToBucketStore(BucketStore.ServiceType.RetentionService, scope2, streamName2, executor2).join();
BucketServiceFactory bucketStoreFactory = new BucketServiceFactory(hostId, bucketStore2, 5);
BucketManager service2 = bucketStoreFactory.createRetentionService(Duration.ofMillis(5000), stream -> CompletableFuture.completedFuture(null), executor2);
service2.startAsync();
service2.awaitRunning();
Thread.sleep(10000);
assertTrue(service2.getBucketServices().values().stream().allMatch(x -> x.getKnownStreams().size() == 2));
service2.stopAsync();
service2.awaitTerminated();
zkClient2.close();
zkServer2.close();
streamMetadataTasks2.close();
ExecutorServiceHelpers.shutdown(executor2);
}
use of io.pravega.controller.store.stream.StreamMetadataStore in project pravega by pravega.
the class WatermarkWorkflowTest method testWriterTimeout.
@Test(timeout = 30000L)
public void testWriterTimeout() {
SynchronizerClientFactory clientFactory = spy(SynchronizerClientFactory.class);
ConcurrentHashMap<String, MockRevisionedStreamClient> revisionedStreamClientMap = new ConcurrentHashMap<>();
doAnswer(x -> {
String streamName = x.getArgument(0);
return revisionedStreamClientMap.compute(streamName, (s, rsc) -> {
if (rsc != null) {
return rsc;
} else {
return new MockRevisionedStreamClient();
}
});
}).when(clientFactory).createRevisionedStreamClient(anyString(), any(), any());
StreamMetadataStore streamMetadataStoreSpied = spy(this.streamMetadataStore);
BucketStore bucketStoreSpied = spy(this.bucketStore);
@Cleanup PeriodicWatermarking periodicWatermarking = new PeriodicWatermarking(streamMetadataStoreSpied, bucketStoreSpied, sp -> clientFactory, executor, new RequestTracker(false));
String streamName = "stream";
String scope = "scope";
streamMetadataStoreSpied.createScope(scope, null, executor).join();
streamMetadataStoreSpied.createStream(scope, streamName, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(3)).timestampAggregationTimeout(3000L).build(), System.currentTimeMillis(), null, executor).join();
streamMetadataStoreSpied.setState(scope, streamName, State.ACTIVE, null, executor).join();
// 2. note writer1, writer2, writer3 marks
// writer 1 reports segments 0, 1.
// writer 2 reports segments 1, 2,
// writer 3 reports segment 0, 2
String writer1 = "writer1";
streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer1, 102L, ImmutableMap.of(0L, 100L, 1L, 0L, 2L, 0L), null, executor).join();
String writer2 = "writer2";
streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer2, 101L, ImmutableMap.of(0L, 0L, 1L, 100L, 2L, 0L), null, executor).join();
String writer3 = "writer3";
streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer3, 100L, ImmutableMap.of(0L, 0L, 1L, 0L, 2L, 100L), null, executor).join();
// 3. run watermarking workflow.
StreamImpl stream = new StreamImpl(scope, streamName);
periodicWatermarking.watermark(stream).join();
// verify that a watermark has been emitted.
MockRevisionedStreamClient revisionedClient = revisionedStreamClientMap.get(NameUtils.getMarkStreamForStream(streamName));
assertEquals(revisionedClient.watermarks.size(), 1);
// Don't report time from writer3
streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer1, 200L, ImmutableMap.of(0L, 200L, 1L, 0L, 2L, 0L), null, executor).join();
streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer2, 200L, ImmutableMap.of(0L, 0L, 1L, 200L, 2L, 0L), null, executor).join();
// no new watermark should be emitted, writers should be tracked for inactivity
periodicWatermarking.watermark(stream).join();
assertEquals(revisionedClient.watermarks.size(), 1);
verify(streamMetadataStoreSpied, never()).removeWriter(anyString(), anyString(), anyString(), any(), any(), any());
verify(bucketStoreSpied, never()).removeStreamFromBucketStore(any(), anyString(), anyString(), any());
// call again. Still no new watermark should be emitted as writers have not timed out
periodicWatermarking.watermark(stream).join();
assertEquals(revisionedClient.watermarks.size(), 1);
verify(streamMetadataStoreSpied, never()).removeWriter(anyString(), anyString(), anyString(), any(), any(), any());
verify(bucketStoreSpied, never()).removeStreamFromBucketStore(any(), anyString(), anyString(), any());
// call watermark after a delay of 5 more seconds. The writer3 should timeout because it has a timeout of 3 seconds.
Futures.delayedFuture(() -> periodicWatermarking.watermark(stream), 5000L, executor).join();
verify(streamMetadataStoreSpied, times(1)).removeWriter(anyString(), anyString(), anyString(), any(), any(), any());
verify(bucketStoreSpied, never()).removeStreamFromBucketStore(any(), anyString(), anyString(), any());
// watermark should be emitted. without considering writer3
assertEquals(revisionedClient.watermarks.size(), 2);
Watermark watermark = revisionedClient.watermarks.get(1).getValue();
assertEquals(watermark.getLowerTimeBound(), 200L);
assertEquals(watermark.getStreamCut().size(), 3);
assertEquals(getSegmentOffset(watermark, 0L), 200L);
assertEquals(getSegmentOffset(watermark, 1L), 200L);
assertEquals(getSegmentOffset(watermark, 2L), 100L);
// call watermark workflow again so that both writers are tracked for inactivity
periodicWatermarking.watermark(stream).join();
assertEquals(revisionedClient.watermarks.size(), 2);
verify(streamMetadataStoreSpied, times(1)).removeWriter(anyString(), anyString(), anyString(), any(), any(), any());
verify(bucketStoreSpied, never()).removeStreamFromBucketStore(any(), anyString(), anyString(), any());
// now introduce more delays and see all writers are removed and stream is discontinued from watermarking computation.
Futures.delayedFuture(() -> periodicWatermarking.watermark(stream), 5000L, executor).join();
// verify that stream is discontinued from tracking for watermarking
verify(streamMetadataStoreSpied, times(3)).removeWriter(anyString(), anyString(), anyString(), any(), any(), any());
verify(bucketStoreSpied, times(1)).removeStreamFromBucketStore(any(), anyString(), anyString(), any());
// call note time for writer3 and verify that watermark is emitted.
streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer3, 300L, ImmutableMap.of(0L, 300L, 1L, 0L, 2L, 0L), null, executor).join();
periodicWatermarking.watermark(stream).join();
assertEquals(revisionedClient.watermarks.size(), 3);
watermark = revisionedClient.watermarks.get(2).getValue();
assertEquals(watermark.getLowerTimeBound(), 300L);
assertEquals(watermark.getStreamCut().size(), 3);
assertEquals(getSegmentOffset(watermark, 0L), 300L);
assertEquals(getSegmentOffset(watermark, 1L), 200L);
assertEquals(getSegmentOffset(watermark, 2L), 100L);
}
use of io.pravega.controller.store.stream.StreamMetadataStore in project pravega by pravega.
the class StreamMetadataTasksTest method testeventHelperNPE.
@Test(timeout = 30000)
public void testeventHelperNPE() throws Exception {
StreamMetadataStore streamMetadataStore = getStore();
ImmutableMap<BucketStore.ServiceType, Integer> map = ImmutableMap.of(BucketStore.ServiceType.RetentionService, 1, BucketStore.ServiceType.WatermarkingService, 1);
bucketStore = StreamStoreFactory.createInMemoryBucketStore(map);
TaskMetadataStore taskMetadataStore = TaskStoreFactory.createZKStore(zkClient, executor);
SegmentHelper segmentHelperMock = SegmentHelperMock.getSegmentHelperMock();
List<Map.Entry<Double, Double>> newRanges = new ArrayList<>();
newRanges.add(new AbstractMap.SimpleEntry<>(0.5, 0.75));
newRanges.add(new AbstractMap.SimpleEntry<>(0.75, 1.0));
EventHelper helper = EventHelperMock.getEventHelperMock(executor, "host", ((AbstractStreamMetadataStore) streamMetadataStore).getHostTaskIndex());
@Cleanup StreamMetadataTasks streamMetadataTasks = new StreamMetadataTasks(streamMetadataStore, bucketStore, taskMetadataStore, segmentHelperMock, executor, "host", new GrpcAuthHelper(authEnabled, "key", 300), helper);
CompletableFuture<ScaleResponse> scaleResponse = streamMetadataTasks.manualScale(SCOPE, "hellow", Collections.singletonList(1L), newRanges, 30, 0L);
if (!scaleResponse.isDone()) {
AbstractClientFactoryImpl clientFactory = mock(AbstractClientFactoryImpl.class);
streamMetadataTasks.initializeStreamWriters(clientFactory, "_requestStream");
}
assertEquals(ScaleResponse.ScaleStreamStatus.FAILURE, scaleResponse.join().getStatus());
}
Aggregations