use of io.pravega.controller.task.Stream.StreamMetadataTasks in project pravega by pravega.
the class ControllerServiceTest method testDeleteScope.
@Test
public void testDeleteScope() {
String testScope = "testScope";
streamStore.createScope(testScope, null, executor).join();
StreamMetadataTasks streamMetadataTasks1 = mock(StreamMetadataTasks.class);
doAnswer(invocation -> {
CompletableFuture<Controller.DeleteScopeStatus.Status> future = new CompletableFuture<>();
future.complete(Controller.DeleteScopeStatus.Status.SUCCESS);
return future;
}).when(streamMetadataTasks1).deleteScopeRecursive(testScope, 123L);
StreamMetrics.initialize();
CompletableFuture<Controller.DeleteScopeStatus> future = consumer.deleteScopeRecursive(testScope, 123L);
future.join();
}
use of io.pravega.controller.task.Stream.StreamMetadataTasks in project pravega by pravega.
the class EventProcessorHealthContributorTest method setup.
@SneakyThrows
@Before
public void setup() {
Host host = mock(Host.class);
LocalController localController = mock(LocalController.class);
CheckpointStore checkpointStore = mock(CheckpointStore.class);
StreamMetadataStore streamStore = mock(StreamMetadataStore.class);
BucketStore bucketStore = mock(BucketStore.class);
ConnectionPool connectionPool = mock(ConnectionPool.class);
StreamMetadataTasks streamMetadataTasks = mock(StreamMetadataTasks.class);
StreamTransactionMetadataTasks streamTransactionMetadataTasks = mock(StreamTransactionMetadataTasks.class);
ScheduledExecutorService executor = mock(ScheduledExecutorService.class);
KVTableMetadataStore kvtMetadataStore = mock(KVTableMetadataStore.class);
TableMetadataTasks kvtMetadataTasks = mock(TableMetadataTasks.class);
EventProcessorSystem system = mock(EventProcessorSystemImpl.class);
ControllerEventProcessorConfig config = ControllerEventProcessorConfigImpl.withDefault();
EventProcessorGroup<ControllerEvent> processor = getProcessor();
EventProcessorGroup<ControllerEvent> mockProcessor = spy(processor);
doThrow(new CheckpointStoreException("host not found")).when(mockProcessor).notifyProcessFailure("host3");
when(system.createEventProcessorGroup(any(), any(), any())).thenReturn(mockProcessor);
eventProcessors = spy(new ControllerEventProcessors(host.getHostId(), config, localController, checkpointStore, streamStore, bucketStore, connectionPool, streamMetadataTasks, streamTransactionMetadataTasks, kvtMetadataStore, kvtMetadataTasks, system, executorService()));
doReturn(true).when(eventProcessors).isReady();
contributor = new EventProcessorHealthContributor("eventprocessors", eventProcessors);
builder = Health.builder().name("eventprocessors");
}
use of io.pravega.controller.task.Stream.StreamMetadataTasks in project pravega by pravega.
the class LocalControllerTest method testCreateReaderGroup.
@Test(timeout = 10000)
public void testCreateReaderGroup() throws ExecutionException, InterruptedException {
final Segment seg0 = new Segment("scope", "stream1", 0L);
final Segment seg1 = new Segment("scope", "stream1", 1L);
ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 10L, seg1, 10L);
Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of("scope", "stream1"), new StreamCutImpl(Stream.of("scope", "stream1"), startStreamCut));
ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 200L, seg1, 300L);
Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of("scope", "stream1"), new StreamCutImpl(Stream.of("scope", "stream1"), endStreamCut));
ReaderGroupConfig rgConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
final ReaderGroupConfig config = ReaderGroupConfig.cloneConfig(rgConfig, UUID.randomUUID(), 0L);
StreamMetadataTasks mockStreamMetaTasks = mock(StreamMetadataTasks.class);
final String scope = "scope";
final String rgName = "subscriber";
when(this.mockControllerService.getStreamMetadataTasks()).thenReturn(mockStreamMetaTasks);
Controller.ReaderGroupConfiguration expectedConfig = ModelHelper.decode(scope, rgName, config);
when(mockStreamMetaTasks.createReaderGroupInternal(anyString(), any(), any(), anyLong(), anyLong())).thenReturn(CompletableFuture.completedFuture(Controller.CreateReaderGroupResponse.newBuilder().setConfig(expectedConfig).setStatus(Controller.CreateReaderGroupResponse.Status.SUCCESS).build()));
ReaderGroupConfig responseCfg = this.testController.createReaderGroup(scope, rgName, config).join();
Assert.assertEquals(UUID.fromString(expectedConfig.getReaderGroupId()), responseCfg.getReaderGroupId());
Assert.assertEquals(expectedConfig.getRetentionType(), responseCfg.getRetentionType().ordinal());
Assert.assertEquals(expectedConfig.getGeneration(), responseCfg.getGeneration());
Assert.assertEquals(expectedConfig.getGroupRefreshTimeMillis(), responseCfg.getGroupRefreshTimeMillis());
Assert.assertEquals(expectedConfig.getAutomaticCheckpointIntervalMillis(), responseCfg.getAutomaticCheckpointIntervalMillis());
Assert.assertEquals(expectedConfig.getMaxOutstandingCheckpointRequest(), responseCfg.getMaxOutstandingCheckpointRequest());
when(mockStreamMetaTasks.createReaderGroupInternal(anyString(), any(), any(), anyLong(), anyLong())).thenReturn(CompletableFuture.completedFuture(Controller.CreateReaderGroupResponse.newBuilder().setConfig(expectedConfig).setStatus(Controller.CreateReaderGroupResponse.Status.FAILURE).build()));
assertThrows("Expected ControllerFailureException", () -> this.testController.createReaderGroup("scope", "subscriber", config).join(), ex -> ex instanceof ControllerFailureException);
when(mockStreamMetaTasks.createReaderGroupInternal(anyString(), any(), any(), anyLong(), anyLong())).thenReturn(CompletableFuture.completedFuture(Controller.CreateReaderGroupResponse.newBuilder().setConfig(expectedConfig).setStatus(Controller.CreateReaderGroupResponse.Status.INVALID_RG_NAME).build()));
assertThrows("Expected IllegalArgumentException", () -> this.testController.createReaderGroup("scope", "subscriber", config).join(), ex -> ex instanceof IllegalArgumentException);
when(mockStreamMetaTasks.createReaderGroupInternal(anyString(), any(), any(), anyLong(), anyLong())).thenReturn(CompletableFuture.completedFuture(Controller.CreateReaderGroupResponse.newBuilder().setConfig(expectedConfig).setStatus(Controller.CreateReaderGroupResponse.Status.SCOPE_NOT_FOUND).build()));
assertThrows("Expected IllegalArgumentException", () -> this.testController.createReaderGroup("scope", "subscriber", config).join(), ex -> ex instanceof IllegalArgumentException);
when(mockStreamMetaTasks.createReaderGroupInternal(anyString(), any(), any(), anyLong(), anyLong())).thenReturn(CompletableFuture.completedFuture(Controller.CreateReaderGroupResponse.newBuilder().setStatusValue(-1).build()));
assertThrows("Expected ControllerFailureException", () -> this.testController.createReaderGroup("scope", "subscriber", config).join(), ex -> ex instanceof ControllerFailureException);
}
use of io.pravega.controller.task.Stream.StreamMetadataTasks in project pravega by pravega.
the class ScaleRequestHandlerTest method testScaleRequestWithMinimumSegment.
@Test(timeout = 30000)
public void testScaleRequestWithMinimumSegment() throws ExecutionException, InterruptedException {
AutoScaleTask requestHandler = new AutoScaleTask(streamMetadataTasks, streamStore, executor);
ScaleOperationTask scaleRequestHandler = new ScaleOperationTask(streamMetadataTasks, streamStore, executor);
StreamRequestHandler multiplexer = new StreamRequestHandler(requestHandler, scaleRequestHandler, null, null, null, null, null, null, null, streamStore, null, executor);
EventWriterMock writer = new EventWriterMock();
streamMetadataTasks.setRequestEventWriter(writer);
String stream = "mystream";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 5)).build();
streamMetadataTasks.createStream(scope, stream, config, System.currentTimeMillis(), 0L).get();
// change stream configuration to min segment count = 4
config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 4)).build();
streamStore.startUpdateConfiguration(scope, stream, config, null, executor).join();
VersionedMetadata<StreamConfigurationRecord> configRecord = streamStore.getConfigurationRecord(scope, stream, null, executor).join();
streamStore.completeUpdateConfiguration(scope, stream, configRecord, null, executor).join();
// process first auto scale down event. it should only mark the segment as cold
multiplexer.process(new AutoScaleEvent(scope, stream, 1L, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis()), () -> false).join();
assertTrue(writer.queue.isEmpty());
assertTrue(streamStore.isCold(scope, stream, 1L, null, executor).join());
// process second auto scale down event. since its not for an immediate neighbour so it should only mark the segment as cold
multiplexer.process(new AutoScaleEvent(scope, stream, 3L, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis()), () -> false).join();
assertTrue(streamStore.isCold(scope, stream, 3L, null, executor).join());
// no scale event should be posted
assertTrue(writer.queue.isEmpty());
// process third auto scale down event. This should result in a scale op event being posted to merge segments 0, 1
multiplexer.process(new AutoScaleEvent(scope, stream, 0L, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis()), () -> false).join();
assertTrue(streamStore.isCold(scope, stream, 0L, null, executor).join());
// verify that a new event has been posted
assertEquals(1, writer.queue.size());
ControllerEvent event = writer.queue.take();
assertTrue(event instanceof ScaleOpEvent);
ScaleOpEvent scaleDownEvent1 = (ScaleOpEvent) event;
assertEquals(1, scaleDownEvent1.getNewRanges().size());
assertEquals(2, scaleDownEvent1.getSegmentsToSeal().size());
assertTrue(scaleDownEvent1.getSegmentsToSeal().contains(0L));
assertTrue(scaleDownEvent1.getSegmentsToSeal().contains(1L));
// process fourth auto scale down event. This should result in a scale op event being posted to merge segments 3, 4
multiplexer.process(new AutoScaleEvent(scope, stream, 4L, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis()), () -> false).join();
assertTrue(streamStore.isCold(scope, stream, 4L, null, executor).join());
// verify that a new event has been posted
assertEquals(1, writer.queue.size());
event = writer.queue.take();
assertTrue(event instanceof ScaleOpEvent);
ScaleOpEvent scaleDownEvent2 = (ScaleOpEvent) event;
assertEquals(1, scaleDownEvent2.getNewRanges().size());
assertEquals(2, scaleDownEvent2.getSegmentsToSeal().size());
assertTrue(scaleDownEvent2.getSegmentsToSeal().contains(3L));
assertTrue(scaleDownEvent2.getSegmentsToSeal().contains(4L));
// process first scale down event, this should submit scale and scale the stream down to 4 segments
multiplexer.process(scaleDownEvent1, () -> false).join();
EpochRecord activeEpoch = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
List<StreamSegmentRecord> segments = activeEpoch.getSegments();
assertEquals(1, activeEpoch.getEpoch());
assertEquals(4, segments.size());
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 2));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 3));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 4));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 5));
// process second scale down event, this should submit scale and scale the stream down to 4 segments
multiplexer.process(scaleDownEvent2, () -> false).join();
// verify that no scale has happened
activeEpoch = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
// verify that no scale has happened.
assertEquals(1, activeEpoch.getEpoch());
assertEquals(4, segments.size());
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 2));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 3));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 4));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 5));
}
use of io.pravega.controller.task.Stream.StreamMetadataTasks in project pravega by pravega.
the class ScaleRequestHandlerTest method setup.
@Before
public void setup() throws Exception {
StreamMetrics.initialize();
TransactionMetrics.initialize();
zkServer = new TestingServerStarter().start();
zkServer.start();
zkClient = CuratorFrameworkFactory.newClient(zkServer.getConnectString(), new ExponentialBackoffRetry(20, 1, 50));
zkClient.start();
String hostId;
try {
// On each controller process restart, it gets a fresh hostId,
// which is a combination of hostname and random GUID.
hostId = InetAddress.getLocalHost().getHostAddress() + UUID.randomUUID().toString();
} catch (UnknownHostException e) {
hostId = UUID.randomUUID().toString();
}
streamStore = spy(getStore());
bucketStore = StreamStoreFactory.createZKBucketStore(zkClient, executor);
taskMetadataStore = TaskStoreFactory.createZKStore(zkClient, executor);
connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
clientFactory = mock(EventStreamClientFactory.class);
SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock();
streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelper, executor, hostId, GrpcAuthHelper.getDisabledAuthHelper());
streamMetadataTasks.initializeStreamWriters(clientFactory, Config.SCALE_STREAM_NAME);
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelper, executor, hostId, GrpcAuthHelper.getDisabledAuthHelper());
long createTimestamp = System.currentTimeMillis();
// add a host in zk
// mock pravega
// create a stream
streamStore.createScope(scope, null, executor).get();
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 3)).build();
streamMetadataTasks.createStream(scope, stream, config, createTimestamp, 0L).get();
// set minimum number of segments to 1 so that we can also test scale downs
config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 1)).build();
streamStore.startUpdateConfiguration(scope, stream, config, null, executor).join();
VersionedMetadata<StreamConfigurationRecord> configRecord = streamStore.getConfigurationRecord(scope, stream, null, executor).join();
streamStore.completeUpdateConfiguration(scope, stream, configRecord, null, executor).join();
}
Aggregations