use of io.pravega.controller.store.task.TaskMetadataStore in project pravega by pravega.
the class ControllerServiceStarter method startUp.
@Override
protected void startUp() {
long traceId = LoggerHelpers.traceEnterWithContext(log, this.objectId, "startUp");
log.info("Initiating controller service startUp");
log.info("Event processors enabled = {}", serviceConfig.getEventProcessorConfig().isPresent());
log.info("Cluster listener enabled = {}", serviceConfig.isControllerClusterListenerEnabled());
log.info(" Host monitor enabled = {}", serviceConfig.getHostMonitorConfig().isHostMonitorEnabled());
log.info(" gRPC server enabled = {}", serviceConfig.getGRPCServerConfig().isPresent());
log.info(" REST server enabled = {}", serviceConfig.getRestServerConfig().isPresent());
final StreamMetadataStore streamStore;
final TaskMetadataStore taskMetadataStore;
final HostControllerStore hostStore;
final CheckpointStore checkpointStore;
try {
// Initialize the executor service.
controllerExecutor = ExecutorServiceHelpers.newScheduledThreadPool(serviceConfig.getThreadPoolSize(), "controllerpool");
retentionExecutor = ExecutorServiceHelpers.newScheduledThreadPool(Config.RETENTION_THREAD_POOL_SIZE, "retentionpool");
log.info("Creating the stream store");
streamStore = StreamStoreFactory.createStore(storeClient, controllerExecutor);
log.info("Creating the task store");
taskMetadataStore = TaskStoreFactory.createStore(storeClient, controllerExecutor);
log.info("Creating the host store");
hostStore = HostStoreFactory.createStore(serviceConfig.getHostMonitorConfig(), storeClient);
log.info("Creating the checkpoint store");
checkpointStore = CheckpointStoreFactory.create(storeClient);
// On each controller process restart, we use a fresh hostId,
// which is a combination of hostname and random GUID.
String hostName = getHostName();
Host host = new Host(hostName, getPort(), UUID.randomUUID().toString());
if (serviceConfig.getHostMonitorConfig().isHostMonitorEnabled()) {
// Start the Segment Container Monitor.
monitor = new SegmentContainerMonitor(hostStore, (CuratorFramework) storeClient.getClient(), new UniformContainerBalancer(), serviceConfig.getHostMonitorConfig().getHostMonitorMinRebalanceInterval());
log.info("Starting segment container monitor");
monitor.startAsync();
}
ClientConfig clientConfig = ClientConfig.builder().controllerURI(URI.create((serviceConfig.getGRPCServerConfig().get().isTlsEnabled() ? "tls://" : "tcp://") + "localhost")).trustStore(serviceConfig.getGRPCServerConfig().get().getTlsTrustStore()).validateHostName(false).build();
connectionFactory = new ConnectionFactoryImpl(clientConfig);
SegmentHelper segmentHelper = new SegmentHelper();
streamMetadataTasks = new StreamMetadataTasks(streamStore, hostStore, taskMetadataStore, segmentHelper, controllerExecutor, host.getHostId(), connectionFactory, serviceConfig.getGRPCServerConfig().get().isAuthorizationEnabled(), serviceConfig.getGRPCServerConfig().get().getTokenSigningKey());
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, hostStore, segmentHelper, controllerExecutor, host.getHostId(), serviceConfig.getTimeoutServiceConfig(), connectionFactory, serviceConfig.getGRPCServerConfig().get().isAuthorizationEnabled(), serviceConfig.getGRPCServerConfig().get().getTokenSigningKey());
streamCutService = new StreamCutService(Config.BUCKET_COUNT, host.getHostId(), streamStore, streamMetadataTasks, retentionExecutor);
log.info("starting auto retention service asynchronously");
streamCutService.startAsync();
streamCutService.awaitRunning();
// Controller has a mechanism to track the currently active controller host instances. On detecting a failure of
// any controller instance, the failure detector stores the failed HostId in a failed hosts directory (FH), and
// invokes the taskSweeper.sweepOrphanedTasks for each failed host. When all resources under the failed hostId
// are processed and deleted, that failed HostId is removed from FH folder.
// Moreover, on controller process startup, it detects any hostIds not in the currently active set of
// controllers and starts sweeping tasks orphaned by those hostIds.
TaskSweeper taskSweeper = new TaskSweeper(taskMetadataStore, host.getHostId(), controllerExecutor, streamMetadataTasks);
TxnSweeper txnSweeper = new TxnSweeper(streamStore, streamTransactionMetadataTasks, serviceConfig.getTimeoutServiceConfig().getMaxLeaseValue(), controllerExecutor);
if (serviceConfig.isControllerClusterListenerEnabled()) {
cluster = new ClusterZKImpl((CuratorFramework) storeClient.getClient(), ClusterType.CONTROLLER);
}
controllerService = new ControllerService(streamStore, hostStore, streamMetadataTasks, streamTransactionMetadataTasks, new SegmentHelper(), controllerExecutor, cluster);
// Setup event processors.
setController(new LocalController(controllerService, serviceConfig.getGRPCServerConfig().get().isAuthorizationEnabled(), serviceConfig.getGRPCServerConfig().get().getTokenSigningKey()));
if (serviceConfig.getEventProcessorConfig().isPresent()) {
// Create ControllerEventProcessor object.
controllerEventProcessors = new ControllerEventProcessors(host.getHostId(), serviceConfig.getEventProcessorConfig().get(), localController, checkpointStore, streamStore, hostStore, segmentHelper, connectionFactory, streamMetadataTasks, controllerExecutor);
// Bootstrap and start it asynchronously.
log.info("Starting event processors");
controllerEventProcessors.bootstrap(streamTransactionMetadataTasks, streamMetadataTasks).thenAcceptAsync(x -> controllerEventProcessors.startAsync(), controllerExecutor);
}
// Setup and start controller cluster listener after all sweepers have been initialized.
if (serviceConfig.isControllerClusterListenerEnabled()) {
List<FailoverSweeper> failoverSweepers = new ArrayList<>();
failoverSweepers.add(taskSweeper);
failoverSweepers.add(txnSweeper);
if (serviceConfig.getEventProcessorConfig().isPresent()) {
assert controllerEventProcessors != null;
failoverSweepers.add(controllerEventProcessors);
}
controllerClusterListener = new ControllerClusterListener(host, cluster, controllerExecutor, failoverSweepers);
log.info("Starting controller cluster listener");
controllerClusterListener.startAsync();
}
// Start RPC server.
if (serviceConfig.getGRPCServerConfig().isPresent()) {
grpcServer = new GRPCServer(controllerService, serviceConfig.getGRPCServerConfig().get());
grpcServer.startAsync();
log.info("Awaiting start of rpc server");
grpcServer.awaitRunning();
}
// Start REST server.
if (serviceConfig.getRestServerConfig().isPresent()) {
restServer = new RESTServer(this.localController, controllerService, grpcServer.getPravegaAuthManager(), serviceConfig.getRestServerConfig().get(), connectionFactory);
restServer.startAsync();
log.info("Awaiting start of REST server");
restServer.awaitRunning();
}
// Wait for controller event processors to start.
if (serviceConfig.getEventProcessorConfig().isPresent()) {
log.info("Awaiting start of controller event processors");
controllerEventProcessors.awaitRunning();
}
// Wait for controller cluster listeners to start.
if (serviceConfig.isControllerClusterListenerEnabled()) {
log.info("Awaiting start of controller cluster listener");
controllerClusterListener.awaitRunning();
}
} finally {
LoggerHelpers.traceLeave(log, this.objectId, "startUp", traceId);
}
}
use of io.pravega.controller.store.task.TaskMetadataStore in project pravega by pravega.
the class IntermittentCnxnFailureTest method setup.
@Before
public void setup() throws Exception {
zkServer = new TestingServerStarter().start();
zkServer.start();
zkClient = CuratorFrameworkFactory.newClient(zkServer.getConnectString(), new ExponentialBackoffRetry(200, 10, 5000));
zkClient.start();
streamStore = StreamStoreFactory.createZKStore(zkClient, executor);
TaskMetadataStore taskMetadataStore = TaskStoreFactory.createZKStore(zkClient, executor);
HostControllerStore hostStore = HostStoreFactory.createInMemoryStore(HostMonitorConfigImpl.dummyConfig());
segmentHelperMock = spy(new SegmentHelper());
doReturn(Controller.NodeUri.newBuilder().setEndpoint("localhost").setPort(Config.SERVICE_PORT).build()).when(segmentHelperMock).getSegmentUri(anyString(), anyString(), anyInt(), any());
ConnectionFactoryImpl connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().build());
streamMetadataTasks = new StreamMetadataTasks(streamStore, hostStore, taskMetadataStore, segmentHelperMock, executor, "host", connectionFactory, false, "");
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, hostStore, segmentHelperMock, executor, "host", connectionFactory, false, "");
controllerService = new ControllerService(streamStore, hostStore, streamMetadataTasks, streamTransactionMetadataTasks, segmentHelperMock, executor, null);
controllerService.createScope(SCOPE).get();
}
use of io.pravega.controller.store.task.TaskMetadataStore in project pravega by pravega.
the class StreamCutServiceTest method setup.
@Before
public void setup() throws Exception {
executor = Executors.newScheduledThreadPool(10);
hostId = UUID.randomUUID().toString();
streamMetadataStore = createStore(3, executor);
TaskMetadataStore taskMetadataStore = TaskStoreFactory.createInMemoryStore(executor);
HostControllerStore hostStore = HostStoreFactory.createInMemoryStore(HostMonitorConfigImpl.dummyConfig());
SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock();
connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().build());
streamMetadataTasks = new StreamMetadataTasks(streamMetadataStore, hostStore, taskMetadataStore, segmentHelper, executor, hostId, connectionFactory, false, "");
service = new StreamCutService(3, hostId, streamMetadataStore, streamMetadataTasks, executor);
service.startAsync();
service.awaitRunning();
}
use of io.pravega.controller.store.task.TaskMetadataStore in project pravega by pravega.
the class ZkStoreRetentionTest method testOwnershipOfExistingBucket.
@Test(timeout = 10000)
public void testOwnershipOfExistingBucket() throws Exception {
TestingServer zkServer2 = new TestingServerStarter().start();
zkServer2.start();
CuratorFramework zkClient2 = CuratorFrameworkFactory.newClient(zkServer2.getConnectString(), 10000, 1000, (r, e, s) -> false);
zkClient2.start();
ScheduledExecutorService executor2 = Executors.newScheduledThreadPool(10);
String hostId = UUID.randomUUID().toString();
StreamMetadataStore streamMetadataStore2 = StreamStoreFactory.createZKStore(zkClient2, 1, executor2);
TaskMetadataStore taskMetadataStore = TaskStoreFactory.createInMemoryStore(executor2);
HostControllerStore hostStore = HostStoreFactory.createInMemoryStore(HostMonitorConfigImpl.dummyConfig());
SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock();
ConnectionFactoryImpl connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().build());
StreamMetadataTasks streamMetadataTasks2 = new StreamMetadataTasks(streamMetadataStore2, hostStore, taskMetadataStore, segmentHelper, executor2, hostId, connectionFactory, false, "");
String scope = "scope1";
String streamName = "stream1";
streamMetadataStore2.addUpdateStreamForAutoStreamCut(scope, streamName, RetentionPolicy.builder().build(), null, executor2).join();
String scope2 = "scope2";
String streamName2 = "stream2";
streamMetadataStore2.addUpdateStreamForAutoStreamCut(scope2, streamName2, RetentionPolicy.builder().build(), null, executor2).join();
StreamCutService service2 = new StreamCutService(1, hostId, streamMetadataStore2, streamMetadataTasks2, executor2);
service2.startAsync();
service2.awaitRunning();
assertTrue(service2.getBuckets().stream().allMatch(x -> x.getRetentionFutureMap().size() == 2));
service2.stopAsync();
service2.awaitTerminated();
zkClient2.close();
zkServer2.close();
streamMetadataTasks2.close();
connectionFactory.close();
executor2.shutdown();
}
use of io.pravega.controller.store.task.TaskMetadataStore in project pravega by pravega.
the class StreamMetadataTasksTest method setup.
@Before
public void setup() throws Exception {
zkServer = new TestingServerStarter().start();
zkServer.start();
zkClient = CuratorFrameworkFactory.newClient(zkServer.getConnectString(), new ExponentialBackoffRetry(200, 10, 5000));
zkClient.start();
StreamMetadataStore streamStore = StreamStoreFactory.createInMemoryStore(1, executor);
// create a partial mock.
streamStorePartialMock = spy(streamStore);
doReturn(CompletableFuture.completedFuture(false)).when(streamStorePartialMock).isTransactionOngoing(anyString(), anyString(), any(), // mock only isTransactionOngoing call.
any());
TaskMetadataStore taskMetadataStore = TaskStoreFactory.createZKStore(zkClient, executor);
HostControllerStore hostStore = HostStoreFactory.createInMemoryStore(HostMonitorConfigImpl.dummyConfig());
SegmentHelper segmentHelperMock = SegmentHelperMock.getSegmentHelperMock();
connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().build());
streamMetadataTasks = spy(new StreamMetadataTasks(streamStorePartialMock, hostStore, taskMetadataStore, segmentHelperMock, executor, "host", connectionFactory, authEnabled, "key"));
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStorePartialMock, hostStore, segmentHelperMock, executor, "host", connectionFactory, authEnabled, "key");
this.streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStorePartialMock, executor), new ScaleOperationTask(streamMetadataTasks, streamStorePartialMock, executor), new UpdateStreamTask(streamMetadataTasks, streamStorePartialMock, executor), new SealStreamTask(streamMetadataTasks, streamStorePartialMock, executor), new DeleteStreamTask(streamMetadataTasks, streamStorePartialMock, executor), new TruncateStreamTask(streamMetadataTasks, streamStorePartialMock, executor), executor);
consumer = new ControllerService(streamStorePartialMock, hostStore, streamMetadataTasks, streamTransactionMetadataTasks, segmentHelperMock, executor, null);
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scope(SCOPE).streamName(stream1).scalingPolicy(policy1).build();
streamStorePartialMock.createScope(SCOPE).join();
long start = System.currentTimeMillis();
streamStorePartialMock.createStream(SCOPE, stream1, configuration1, start, null, executor).get();
streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
AbstractMap.SimpleEntry<Double, Double> segment1 = new AbstractMap.SimpleEntry<>(0.5, 0.75);
AbstractMap.SimpleEntry<Double, Double> segment2 = new AbstractMap.SimpleEntry<>(0.75, 1.0);
List<Integer> sealedSegments = Collections.singletonList(1);
StartScaleResponse response = streamStorePartialMock.startScale(SCOPE, stream1, sealedSegments, Arrays.asList(segment1, segment2), start + 20, false, null, executor).get();
List<Segment> segmentsCreated = response.getSegmentsCreated();
streamStorePartialMock.setState(SCOPE, stream1, State.SCALING, null, executor).get();
streamStorePartialMock.scaleNewSegmentsCreated(SCOPE, stream1, sealedSegments, segmentsCreated, response.getActiveEpoch(), start + 20, null, executor).get();
streamStorePartialMock.scaleSegmentsSealed(SCOPE, stream1, sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), segmentsCreated, response.getActiveEpoch(), start + 20, null, executor).get();
}
Aggregations