use of io.pravega.controller.server.eventProcessor.LocalController in project pravega by pravega.
the class ControllerServiceStarter method startUp.
@Override
protected void startUp() {
long traceId = LoggerHelpers.traceEnterWithContext(log, this.objectId, "startUp");
log.info("Initiating controller service startUp");
log.info("Event processors enabled = {}", serviceConfig.getEventProcessorConfig().isPresent());
log.info("Cluster listener enabled = {}", serviceConfig.isControllerClusterListenerEnabled());
log.info(" Host monitor enabled = {}", serviceConfig.getHostMonitorConfig().isHostMonitorEnabled());
log.info(" gRPC server enabled = {}", serviceConfig.getGRPCServerConfig().isPresent());
log.info(" REST server enabled = {}", serviceConfig.getRestServerConfig().isPresent());
final StreamMetadataStore streamStore;
final TaskMetadataStore taskMetadataStore;
final HostControllerStore hostStore;
final CheckpointStore checkpointStore;
try {
// Initialize the executor service.
controllerExecutor = ExecutorServiceHelpers.newScheduledThreadPool(serviceConfig.getThreadPoolSize(), "controllerpool");
retentionExecutor = ExecutorServiceHelpers.newScheduledThreadPool(Config.RETENTION_THREAD_POOL_SIZE, "retentionpool");
log.info("Creating the stream store");
streamStore = StreamStoreFactory.createStore(storeClient, controllerExecutor);
log.info("Creating the task store");
taskMetadataStore = TaskStoreFactory.createStore(storeClient, controllerExecutor);
log.info("Creating the host store");
hostStore = HostStoreFactory.createStore(serviceConfig.getHostMonitorConfig(), storeClient);
log.info("Creating the checkpoint store");
checkpointStore = CheckpointStoreFactory.create(storeClient);
// On each controller process restart, we use a fresh hostId,
// which is a combination of hostname and random GUID.
String hostName = getHostName();
Host host = new Host(hostName, getPort(), UUID.randomUUID().toString());
if (serviceConfig.getHostMonitorConfig().isHostMonitorEnabled()) {
// Start the Segment Container Monitor.
monitor = new SegmentContainerMonitor(hostStore, (CuratorFramework) storeClient.getClient(), new UniformContainerBalancer(), serviceConfig.getHostMonitorConfig().getHostMonitorMinRebalanceInterval());
log.info("Starting segment container monitor");
monitor.startAsync();
}
ClientConfig clientConfig = ClientConfig.builder().controllerURI(URI.create((serviceConfig.getGRPCServerConfig().get().isTlsEnabled() ? "tls://" : "tcp://") + "localhost")).trustStore(serviceConfig.getGRPCServerConfig().get().getTlsTrustStore()).validateHostName(false).build();
connectionFactory = new ConnectionFactoryImpl(clientConfig);
SegmentHelper segmentHelper = new SegmentHelper();
streamMetadataTasks = new StreamMetadataTasks(streamStore, hostStore, taskMetadataStore, segmentHelper, controllerExecutor, host.getHostId(), connectionFactory, serviceConfig.getGRPCServerConfig().get().isAuthorizationEnabled(), serviceConfig.getGRPCServerConfig().get().getTokenSigningKey());
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, hostStore, segmentHelper, controllerExecutor, host.getHostId(), serviceConfig.getTimeoutServiceConfig(), connectionFactory, serviceConfig.getGRPCServerConfig().get().isAuthorizationEnabled(), serviceConfig.getGRPCServerConfig().get().getTokenSigningKey());
streamCutService = new StreamCutService(Config.BUCKET_COUNT, host.getHostId(), streamStore, streamMetadataTasks, retentionExecutor);
log.info("starting auto retention service asynchronously");
streamCutService.startAsync();
streamCutService.awaitRunning();
// Controller has a mechanism to track the currently active controller host instances. On detecting a failure of
// any controller instance, the failure detector stores the failed HostId in a failed hosts directory (FH), and
// invokes the taskSweeper.sweepOrphanedTasks for each failed host. When all resources under the failed hostId
// are processed and deleted, that failed HostId is removed from FH folder.
// Moreover, on controller process startup, it detects any hostIds not in the currently active set of
// controllers and starts sweeping tasks orphaned by those hostIds.
TaskSweeper taskSweeper = new TaskSweeper(taskMetadataStore, host.getHostId(), controllerExecutor, streamMetadataTasks);
TxnSweeper txnSweeper = new TxnSweeper(streamStore, streamTransactionMetadataTasks, serviceConfig.getTimeoutServiceConfig().getMaxLeaseValue(), controllerExecutor);
if (serviceConfig.isControllerClusterListenerEnabled()) {
cluster = new ClusterZKImpl((CuratorFramework) storeClient.getClient(), ClusterType.CONTROLLER);
}
controllerService = new ControllerService(streamStore, hostStore, streamMetadataTasks, streamTransactionMetadataTasks, new SegmentHelper(), controllerExecutor, cluster);
// Setup event processors.
setController(new LocalController(controllerService, serviceConfig.getGRPCServerConfig().get().isAuthorizationEnabled(), serviceConfig.getGRPCServerConfig().get().getTokenSigningKey()));
if (serviceConfig.getEventProcessorConfig().isPresent()) {
// Create ControllerEventProcessor object.
controllerEventProcessors = new ControllerEventProcessors(host.getHostId(), serviceConfig.getEventProcessorConfig().get(), localController, checkpointStore, streamStore, hostStore, segmentHelper, connectionFactory, streamMetadataTasks, controllerExecutor);
// Bootstrap and start it asynchronously.
log.info("Starting event processors");
controllerEventProcessors.bootstrap(streamTransactionMetadataTasks, streamMetadataTasks).thenAcceptAsync(x -> controllerEventProcessors.startAsync(), controllerExecutor);
}
// Setup and start controller cluster listener after all sweepers have been initialized.
if (serviceConfig.isControllerClusterListenerEnabled()) {
List<FailoverSweeper> failoverSweepers = new ArrayList<>();
failoverSweepers.add(taskSweeper);
failoverSweepers.add(txnSweeper);
if (serviceConfig.getEventProcessorConfig().isPresent()) {
assert controllerEventProcessors != null;
failoverSweepers.add(controllerEventProcessors);
}
controllerClusterListener = new ControllerClusterListener(host, cluster, controllerExecutor, failoverSweepers);
log.info("Starting controller cluster listener");
controllerClusterListener.startAsync();
}
// Start RPC server.
if (serviceConfig.getGRPCServerConfig().isPresent()) {
grpcServer = new GRPCServer(controllerService, serviceConfig.getGRPCServerConfig().get());
grpcServer.startAsync();
log.info("Awaiting start of rpc server");
grpcServer.awaitRunning();
}
// Start REST server.
if (serviceConfig.getRestServerConfig().isPresent()) {
restServer = new RESTServer(this.localController, controllerService, grpcServer.getPravegaAuthManager(), serviceConfig.getRestServerConfig().get(), connectionFactory);
restServer.startAsync();
log.info("Awaiting start of REST server");
restServer.awaitRunning();
}
// Wait for controller event processors to start.
if (serviceConfig.getEventProcessorConfig().isPresent()) {
log.info("Awaiting start of controller event processors");
controllerEventProcessors.awaitRunning();
}
// Wait for controller cluster listeners to start.
if (serviceConfig.isControllerClusterListenerEnabled()) {
log.info("Awaiting start of controller cluster listener");
controllerClusterListener.awaitRunning();
}
} finally {
LoggerHelpers.traceLeave(log, this.objectId, "startUp", traceId);
}
}
use of io.pravega.controller.server.eventProcessor.LocalController in project pravega by pravega.
the class StreamMetaDataTests method setup.
@Before
public void setup() {
mockControllerService = mock(ControllerService.class);
serverConfig = RESTServerConfigImpl.builder().host("localhost").port(TestUtils.getAvailableListenPort()).build();
LocalController controller = new LocalController(mockControllerService, false, "");
restServer = new RESTServer(controller, mockControllerService, authManager, serverConfig, new ConnectionFactoryImpl(ClientConfig.builder().controllerURI(URI.create("tcp://localhost")).build()));
restServer.startAsync();
restServer.awaitRunning();
client = ClientBuilder.newClient();
scalingPolicyCommon.setType(ScalingConfig.TypeEnum.BY_RATE_IN_EVENTS_PER_SEC);
scalingPolicyCommon.setTargetRate(100);
scalingPolicyCommon.setScaleFactor(2);
scalingPolicyCommon.setMinSegments(2);
scalingPolicyCommon2.setType(ScalingConfig.TypeEnum.FIXED_NUM_SEGMENTS);
scalingPolicyCommon2.setMinSegments(2);
retentionPolicyCommon.setType(TypeEnum.LIMITED_DAYS);
retentionPolicyCommon.setValue(123L);
retentionPolicyCommon2.setType(null);
retentionPolicyCommon2.setValue(null);
streamResponseExpected.setScopeName(scope1);
streamResponseExpected.setStreamName(stream1);
streamResponseExpected.setScalingPolicy(scalingPolicyCommon);
streamResponseExpected.setRetentionPolicy(retentionPolicyCommon);
createStreamRequest.setStreamName(stream1);
createStreamRequest.setScalingPolicy(scalingPolicyCommon);
createStreamRequest.setRetentionPolicy(retentionPolicyCommon);
createStreamRequest2.setStreamName(stream1);
createStreamRequest2.setScalingPolicy(scalingPolicyCommon);
createStreamRequest2.setRetentionPolicy(retentionPolicyCommon2);
createStreamRequest3.setStreamName(stream1);
createStreamRequest3.setScalingPolicy(scalingPolicyCommon);
createStreamRequest3.setRetentionPolicy(retentionPolicyCommon);
createStreamRequest4.setStreamName(stream3);
createStreamRequest4.setScalingPolicy(scalingPolicyCommon);
// stream 4 where targetRate and scalingFactor for Scaling Policy are null
createStreamRequest5.setStreamName(stream4);
createStreamRequest5.setScalingPolicy(scalingPolicyCommon2);
createStreamRequest5.setRetentionPolicy(retentionPolicyCommon);
streamResponseExpected2.setScopeName(scope1);
streamResponseExpected2.setStreamName(stream3);
streamResponseExpected2.setScalingPolicy(scalingPolicyCommon);
streamResponseExpected3.setScopeName(scope1);
streamResponseExpected3.setStreamName(stream4);
streamResponseExpected3.setScalingPolicy(scalingPolicyCommon2);
streamResponseExpected3.setRetentionPolicy(retentionPolicyCommon);
updateStreamRequest.setScalingPolicy(scalingPolicyCommon);
updateStreamRequest.setRetentionPolicy(retentionPolicyCommon);
updateStreamRequest2.setScalingPolicy(scalingPolicyCommon);
updateStreamRequest2.setRetentionPolicy(retentionPolicyCommon);
updateStreamRequest3.setScalingPolicy(scalingPolicyCommon);
updateStreamRequest3.setRetentionPolicy(retentionPolicyCommon2);
}
use of io.pravega.controller.server.eventProcessor.LocalController in project pravega by pravega.
the class EndToEndTruncationTest method testTruncation.
@Test(timeout = 30000)
public void testTruncation() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scope("test").streamName("test").scalingPolicy(ScalingPolicy.byEventRate(10, 2, 2)).build();
LocalController controller = (LocalController) controllerWrapper.getController();
controllerWrapper.getControllerService().createScope("test").get();
controller.createStream(config).get();
@Cleanup ConnectionFactory connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().controllerURI(URI.create("tcp://localhost")).build());
@Cleanup ClientFactory clientFactory = new ClientFactoryImpl("test", controller, connectionFactory);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter("test", new JavaSerializer<>(), EventWriterConfig.builder().build());
writer.writeEvent("0", "truncationTest1").get();
// scale
Stream stream = new StreamImpl("test", "test");
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.33);
map.put(0.33, 0.66);
map.put(0.66, 1.0);
Boolean result = controller.scaleStream(stream, Lists.newArrayList(0, 1), map, executor).getFuture().get();
assertTrue(result);
writer.writeEvent("0", "truncationTest2").get();
Map<Integer, Long> streamCutPositions = new HashMap<>();
streamCutPositions.put(2, 0L);
streamCutPositions.put(3, 0L);
streamCutPositions.put(4, 0L);
controller.truncateStream(stream.getStreamName(), stream.getStreamName(), streamCutPositions).join();
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", controller, clientFactory, connectionFactory);
groupManager.createReaderGroup("reader", ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/test").build());
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", "reader", new JavaSerializer<>(), ReaderConfig.builder().build());
EventRead<String> event = reader.readNextEvent(10000);
assertNotNull(event);
assertEquals("truncationTest2", event.getEvent());
event = reader.readNextEvent(1000);
assertNull(event.getEvent());
// TODO: test more scenarios like: issue #2011
// 1. get a valid stream cut with offset > 0
// validate truncation within a segment
// 2. have an existing reader reading from non truncated segment and then truncate the segment.
// verify that reader gets appropriate response and handles it successfully.
}
Aggregations