use of io.pravega.shared.controller.event.ControllerEvent in project pravega by pravega.
the class ScaleRequestHandlerTest method testScaleRequestWithMinimumSegment.
@Test(timeout = 30000)
public void testScaleRequestWithMinimumSegment() throws ExecutionException, InterruptedException {
AutoScaleTask requestHandler = new AutoScaleTask(streamMetadataTasks, streamStore, executor);
ScaleOperationTask scaleRequestHandler = new ScaleOperationTask(streamMetadataTasks, streamStore, executor);
StreamRequestHandler multiplexer = new StreamRequestHandler(requestHandler, scaleRequestHandler, null, null, null, null, null, null, null, streamStore, null, executor);
EventWriterMock writer = new EventWriterMock();
streamMetadataTasks.setRequestEventWriter(writer);
String stream = "mystream";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 5)).build();
streamMetadataTasks.createStream(scope, stream, config, System.currentTimeMillis(), 0L).get();
// change stream configuration to min segment count = 4
config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 4)).build();
streamStore.startUpdateConfiguration(scope, stream, config, null, executor).join();
VersionedMetadata<StreamConfigurationRecord> configRecord = streamStore.getConfigurationRecord(scope, stream, null, executor).join();
streamStore.completeUpdateConfiguration(scope, stream, configRecord, null, executor).join();
// process first auto scale down event. it should only mark the segment as cold
multiplexer.process(new AutoScaleEvent(scope, stream, 1L, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis()), () -> false).join();
assertTrue(writer.queue.isEmpty());
assertTrue(streamStore.isCold(scope, stream, 1L, null, executor).join());
// process second auto scale down event. since its not for an immediate neighbour so it should only mark the segment as cold
multiplexer.process(new AutoScaleEvent(scope, stream, 3L, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis()), () -> false).join();
assertTrue(streamStore.isCold(scope, stream, 3L, null, executor).join());
// no scale event should be posted
assertTrue(writer.queue.isEmpty());
// process third auto scale down event. This should result in a scale op event being posted to merge segments 0, 1
multiplexer.process(new AutoScaleEvent(scope, stream, 0L, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis()), () -> false).join();
assertTrue(streamStore.isCold(scope, stream, 0L, null, executor).join());
// verify that a new event has been posted
assertEquals(1, writer.queue.size());
ControllerEvent event = writer.queue.take();
assertTrue(event instanceof ScaleOpEvent);
ScaleOpEvent scaleDownEvent1 = (ScaleOpEvent) event;
assertEquals(1, scaleDownEvent1.getNewRanges().size());
assertEquals(2, scaleDownEvent1.getSegmentsToSeal().size());
assertTrue(scaleDownEvent1.getSegmentsToSeal().contains(0L));
assertTrue(scaleDownEvent1.getSegmentsToSeal().contains(1L));
// process fourth auto scale down event. This should result in a scale op event being posted to merge segments 3, 4
multiplexer.process(new AutoScaleEvent(scope, stream, 4L, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis()), () -> false).join();
assertTrue(streamStore.isCold(scope, stream, 4L, null, executor).join());
// verify that a new event has been posted
assertEquals(1, writer.queue.size());
event = writer.queue.take();
assertTrue(event instanceof ScaleOpEvent);
ScaleOpEvent scaleDownEvent2 = (ScaleOpEvent) event;
assertEquals(1, scaleDownEvent2.getNewRanges().size());
assertEquals(2, scaleDownEvent2.getSegmentsToSeal().size());
assertTrue(scaleDownEvent2.getSegmentsToSeal().contains(3L));
assertTrue(scaleDownEvent2.getSegmentsToSeal().contains(4L));
// process first scale down event, this should submit scale and scale the stream down to 4 segments
multiplexer.process(scaleDownEvent1, () -> false).join();
EpochRecord activeEpoch = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
List<StreamSegmentRecord> segments = activeEpoch.getSegments();
assertEquals(1, activeEpoch.getEpoch());
assertEquals(4, segments.size());
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 2));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 3));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 4));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 5));
// process second scale down event, this should submit scale and scale the stream down to 4 segments
multiplexer.process(scaleDownEvent2, () -> false).join();
// verify that no scale has happened
activeEpoch = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
// verify that no scale has happened.
assertEquals(1, activeEpoch.getEpoch());
assertEquals(4, segments.size());
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 2));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 3));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 4));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 5));
}
use of io.pravega.shared.controller.event.ControllerEvent in project pravega by pravega.
the class ControllerEventProcessors method initialize.
private void initialize() {
// region Create commit event processor
EventProcessorGroupConfig commitReadersConfig = EventProcessorGroupConfigImpl.builder().streamName(config.getCommitStreamName()).readerGroupName(config.getCommitReaderGroupName()).eventProcessorCount(config.getCommitReaderGroupSize()).checkpointConfig(CheckpointConfig.none()).build();
EventProcessorConfig<CommitEvent> commitConfig = EventProcessorConfig.<CommitEvent>builder().config(commitReadersConfig).decider(ExceptionHandler.DEFAULT_EXCEPTION_HANDLER).serializer(COMMIT_EVENT_SERIALIZER).supplier(() -> new ConcurrentEventProcessor<>(commitRequestHandler, executor)).minRebalanceIntervalMillis(rebalanceIntervalMillis).build();
log.debug("Creating commit event processors");
Retry.indefinitelyWithExpBackoff(DELAY, MULTIPLIER, MAX_DELAY, e -> log.warn("Error creating commit event processor group", e)).run(() -> {
commitEventProcessors = system.createEventProcessorGroup(commitConfig, checkpointStore, rebalanceExecutor);
return null;
});
// endregion
// region Create abort event processor
EventProcessorGroupConfig abortReadersConfig = EventProcessorGroupConfigImpl.builder().streamName(config.getAbortStreamName()).readerGroupName(config.getAbortReaderGroupName()).eventProcessorCount(config.getAbortReaderGroupSize()).checkpointConfig(CheckpointConfig.none()).build();
EventProcessorConfig<AbortEvent> abortConfig = EventProcessorConfig.<AbortEvent>builder().config(abortReadersConfig).decider(ExceptionHandler.DEFAULT_EXCEPTION_HANDLER).serializer(ABORT_EVENT_SERIALIZER).supplier(() -> new ConcurrentEventProcessor<>(abortRequestHandler, executor)).minRebalanceIntervalMillis(rebalanceIntervalMillis).build();
log.debug("Creating abort event processors");
Retry.indefinitelyWithExpBackoff(DELAY, MULTIPLIER, MAX_DELAY, e -> log.warn("Error creating commit event processor group", e)).run(() -> {
abortEventProcessors = system.createEventProcessorGroup(abortConfig, checkpointStore, rebalanceExecutor);
return null;
});
// endregion
// region Create request event processor
EventProcessorGroupConfig requestReadersConfig = EventProcessorGroupConfigImpl.builder().streamName(config.getRequestStreamName()).readerGroupName(config.getRequestReaderGroupName()).eventProcessorCount(1).checkpointConfig(CheckpointConfig.none()).build();
EventProcessorConfig<ControllerEvent> requestConfig = EventProcessorConfig.builder().config(requestReadersConfig).decider(ExceptionHandler.DEFAULT_EXCEPTION_HANDLER).serializer(CONTROLLER_EVENT_SERIALIZER).supplier(() -> new ConcurrentEventProcessor<>(streamRequestHandler, executor)).minRebalanceIntervalMillis(rebalanceIntervalMillis).build();
log.debug("Creating stream request event processors");
Retry.indefinitelyWithExpBackoff(DELAY, MULTIPLIER, MAX_DELAY, e -> log.warn("Error creating request event processor group", e)).run(() -> {
requestEventProcessors = system.createEventProcessorGroup(requestConfig, checkpointStore, rebalanceExecutor);
return null;
});
// endregion
// region Create KVtable event processor
EventProcessorGroupConfig kvtReadersConfig = EventProcessorGroupConfigImpl.builder().streamName(config.getKvtStreamName()).readerGroupName(config.getKvtReaderGroupName()).eventProcessorCount(1).checkpointConfig(CheckpointConfig.none()).build();
EventProcessorConfig<ControllerEvent> kvtRequestConfig = EventProcessorConfig.builder().config(kvtReadersConfig).decider(ExceptionHandler.DEFAULT_EXCEPTION_HANDLER).serializer(CONTROLLER_EVENT_SERIALIZER).supplier(() -> new ConcurrentEventProcessor<>(kvtRequestHandler, executor)).minRebalanceIntervalMillis(rebalanceIntervalMillis).build();
log.debug("Creating kvt request event processors");
Retry.indefinitelyWithExpBackoff(DELAY, MULTIPLIER, MAX_DELAY, e -> log.warn("Error creating request event processor group", e)).run(() -> {
kvtRequestEventProcessors = system.createEventProcessorGroup(kvtRequestConfig, checkpointStore, rebalanceExecutor);
return null;
});
// endregion
log.info("Awaiting start of event processors...");
commitEventProcessors.awaitRunning();
log.info("Commit event processor started.");
abortEventProcessors.awaitRunning();
log.info("Abort event processor started.");
requestEventProcessors.awaitRunning();
log.info("Stream request event processor started.");
kvtRequestEventProcessors.awaitRunning();
log.info("KVT request event processor started.");
}
use of io.pravega.shared.controller.event.ControllerEvent in project pravega by pravega.
the class TableMetadataTasksTest method testWorkflowCompletionTimeout.
@Test(timeout = 30000)
public void testWorkflowCompletionTimeout() throws Exception {
// Create a new KVTable
String tableName = "kvtable2";
long creationTime = System.currentTimeMillis();
KeyValueTableConfiguration kvtConfig = KeyValueTableConfiguration.builder().partitionCount(2).primaryKeyLength(4).secondaryKeyLength(4).build();
CompletableFuture<Controller.CreateKeyValueTableStatus.Status> createOperationFuture = kvtMetadataTasks.createKeyValueTable(SCOPE, tableName, kvtConfig, creationTime, 0L);
assertTrue(Futures.await(processEvent((TableMetadataTasksTest.WriterMock) requestEventWriter)));
assertEquals(CreateKeyValueTableStatus.Status.SUCCESS, createOperationFuture.join());
// Create KVTable times out
EventHelper helper = new EventHelper(executor, "host", ((AbstractKVTableMetadataStore) kvtStore).getHostTaskIndex());
helper.setCompletionTimeoutMillis(50L);
EventStreamWriter<ControllerEvent> eventWriter = new WriterMock();
helper.setRequestEventWriter(eventWriter);
TableMetadataTasks kvtTasks = spy(new TableMetadataTasks(kvtStore, segmentHelperMock, executor, executor, "host", GrpcAuthHelper.getDisabledAuthHelper(), helper));
AssertExtensions.assertFutureThrows("create timedout", kvtTasks.createKeyValueTable(SCOPE, kvtable1, kvtConfig, creationTime, 0L), e -> Exceptions.unwrap(e) instanceof TimeoutException);
// Delete KVTable times out
AssertExtensions.assertFutureThrows("delete timedout", kvtTasks.deleteKeyValueTable(SCOPE, tableName, 0L), e -> Exceptions.unwrap(e) instanceof TimeoutException);
}
use of io.pravega.shared.controller.event.ControllerEvent in project pravega by pravega.
the class ConcurrentEPSerializedRHTest method testEndToEndRequestProcessingFlow.
@Test(timeout = 10000)
public void testEndToEndRequestProcessingFlow() throws InterruptedException, ExecutionException {
AtomicBoolean stop = new AtomicBoolean(false);
TestEvent1 request1 = new TestEvent1("stream", 1);
TestEvent2 request2 = new TestEvent2("stream", 2);
TestEvent3 request3 = new TestEvent3("stream", 3);
// 0. post 3 events in requeststream [e1, e2, e3]
writer.write(request1).join();
writer.write(request2).join();
writer.write(request3).join();
// process throwing retryable exception. Verify that event is written back and checkpoint has moved forward
@Cleanup("shutdownNow") ScheduledExecutorService executor = ExecutorServiceHelpers.newScheduledThreadPool(2, "test");
TestRequestHandler2 requestHandler = new TestRequestHandler2(executor);
ConcurrentEventProcessor<TestBase, TestRequestHandler2> processor = new ConcurrentEventProcessor<>(requestHandler, 1, executor, null, writer, 1, TimeUnit.SECONDS);
CompletableFuture.runAsync(() -> {
while (!stop.get()) {
ControllerEvent take = Exceptions.handleInterruptedCall(() -> requestStream.take());
processor.process((TestBase) take, null);
Exceptions.handleInterrupted(() -> Thread.sleep(100));
}
});
waitingForPhase1.join();
assertTrue(state.get().equals("STATE1"));
request1.future.complete(null);
assertTrue(Futures.await(result1));
assertTrue(Futures.await(result2));
assertTrue(Futures.await(result3));
assertTrue(state.get().equals("ACTIVE"));
stop.set(true);
}
Aggregations