use of io.pravega.controller.server.security.auth.GrpcAuthHelper in project pravega by pravega.
the class ControllerDescribeStreamCommand method execute.
@Override
public void execute() {
ensureArgCount(2);
final String scope = getArg(0);
final String stream = getArg(1);
try {
@Cleanup CuratorFramework zkClient = createZKClient();
ScheduledExecutorService executor = getCommandArgs().getState().getExecutor();
@Cleanup ConnectionPool pool = createConnectionPool();
// The Pravega Controller service may store metadata either at Zookeeper or the Segment Store service
// (tables). We need to instantiate the correct type of metadata store object based on the cluster at hand.
StreamMetadataStore store;
@Cleanup SegmentHelper segmentHelper = null;
if (getCLIControllerConfig().getMetadataBackend().equals(CLIConfig.MetadataBackends.ZOOKEEPER.name())) {
store = StreamStoreFactory.createZKStore(zkClient, executor);
} else {
segmentHelper = instantiateSegmentHelper(zkClient, pool);
GrpcAuthHelper authHelper;
authHelper = GrpcAuthHelper.getDisabledAuthHelper();
store = StreamStoreFactory.createPravegaTablesStore(segmentHelper, authHelper, zkClient, executor);
}
// Output the configuration of this Stream.
CompletableFuture<StreamConfiguration> streamConfig = store.getConfiguration(scope, stream, null, executor);
prettyJSONOutput("stream_config", streamConfig.join());
// Output the state for this Stream.
prettyJSONOutput("stream_state", store.getState(scope, stream, true, null, executor).join());
// Output the total number of segments for this Stream.
Set<Long> segments = store.getAllSegmentIds(scope, stream, null, executor).join();
prettyJSONOutput("segment_count", segments.size());
// Check if the Stream is sealed.
prettyJSONOutput("is_sealed", store.isSealed(scope, stream, null, executor).join());
// Output the active epoch for this Stream.
prettyJSONOutput("active_epoch", store.getActiveEpoch(scope, stream, null, true, executor).join());
// Output the number of active Transactions for ths Stream.
Map<UUID, ActiveTxnRecord> activeTxn = store.getActiveTxns(scope, stream, null, getCommandArgs().getState().getExecutor()).join();
if (!activeTxn.isEmpty()) {
prettyJSONOutput("active_transactions", activeTxn);
}
// Output Truncation point.
prettyJSONOutput("truncation_record", store.getTruncationRecord(scope, stream, null, executor).join().getObject());
// Output the metadata that describes all the scaling information for this Stream.
prettyJSONOutput("scaling_info", store.getScaleMetadata(scope, stream, segments.stream().min(Long::compareTo).get(), segments.stream().max(Long::compareTo).get(), null, executor).join());
// Cleanup resources.
if (segmentHelper != null) {
segmentHelper.close();
store.close();
}
} catch (Exception e) {
System.err.println("Exception accessing the metadata store: " + e.getMessage());
}
}
use of io.pravega.controller.server.security.auth.GrpcAuthHelper in project pravega by pravega.
the class ControllerServiceStarter method startUp.
@Override
protected void startUp() {
long traceId = LoggerHelpers.traceEnterWithContext(log, this.objectId, "startUp");
log.info("Initiating controller service startUp");
log.info("Controller serviceConfig = {}", serviceConfig.toString());
log.info("Event processors enabled = {}", serviceConfig.getEventProcessorConfig().isPresent());
log.info("Cluster listener enabled = {}", serviceConfig.isControllerClusterListenerEnabled());
log.info(" Host monitor enabled = {}", serviceConfig.getHostMonitorConfig().isHostMonitorEnabled());
log.info(" gRPC server enabled = {}", serviceConfig.getGRPCServerConfig().isPresent());
log.info(" REST server enabled = {}", serviceConfig.getRestServerConfig().isPresent());
final BucketStore bucketStore;
final TaskMetadataStore taskMetadataStore;
final HostControllerStore hostStore;
final CheckpointStore checkpointStore;
try {
// Initialize the executor service.
controllerExecutor = ExecutorServiceHelpers.newScheduledThreadPool(serviceConfig.getThreadPoolSize(), "controllerpool");
eventExecutor = ExecutorServiceHelpers.newScheduledThreadPool(serviceConfig.getThreadPoolSize(), "eventprocessor");
retentionExecutor = ExecutorServiceHelpers.newScheduledThreadPool(Config.RETENTION_THREAD_POOL_SIZE, "retentionpool");
watermarkingExecutor = ExecutorServiceHelpers.newScheduledThreadPool(Config.WATERMARKING_THREAD_POOL_SIZE, "watermarkingpool");
bucketStore = StreamStoreFactory.createBucketStore(storeClient, controllerExecutor);
log.info("Created the bucket store.");
taskMetadataStore = TaskStoreFactory.createStore(storeClient, controllerExecutor);
log.info("Created the task store.");
hostStore = HostStoreFactory.createStore(serviceConfig.getHostMonitorConfig(), storeClient);
log.info("Created the host store.");
checkpointStore = CheckpointStoreFactory.create(storeClient);
log.info("Created the checkpoint store.");
// Initialize Stream and Transaction metrics.
StreamMetrics.initialize();
TransactionMetrics.initialize();
// On each controller process restart, we use a fresh hostId,
// which is a combination of hostname and random GUID.
String hostName = getHostName();
Host host = new Host(hostName, getPort(), UUID.randomUUID().toString());
// Create a RequestTracker instance to trace client requests end-to-end.
GRPCServerConfig grpcServerConfig = serviceConfig.getGRPCServerConfig().get();
RequestTracker requestTracker = new RequestTracker(grpcServerConfig.isRequestTracingEnabled());
// Create a Health Service Manager instance.
healthServiceManager = new HealthServiceManager(serviceConfig.getHealthCheckFrequency());
if (serviceConfig.getHostMonitorConfig().isHostMonitorEnabled()) {
// Start the Segment Container Monitor.
monitor = new SegmentContainerMonitor(hostStore, (CuratorFramework) storeClient.getClient(), new UniformContainerBalancer(), serviceConfig.getHostMonitorConfig().getHostMonitorMinRebalanceInterval());
monitor.startAsync();
log.info("Started Segment Container Monitor service.");
SegmentContainerMonitorHealthContributor segmentContainerMonitorHC = new SegmentContainerMonitorHealthContributor("segmentContainerMonitor", monitor);
healthServiceManager.register(segmentContainerMonitorHC);
}
// This client config is used by the segment store helper (SegmentHelper) to connect to the segment store.
ClientConfig.ClientConfigBuilder clientConfigBuilder = ClientConfig.builder().controllerURI(URI.create((grpcServerConfig.isTlsEnabled() ? "tls://" : "tcp://") + "localhost:" + grpcServerConfig.getPort())).trustStore(grpcServerConfig.getTlsTrustStore()).validateHostName(false);
Optional<Boolean> tlsEnabledForSegmentStore = BooleanUtils.extract(serviceConfig.getTlsEnabledForSegmentStore());
if (tlsEnabledForSegmentStore.isPresent()) {
clientConfigBuilder.enableTlsToSegmentStore(tlsEnabledForSegmentStore.get());
}
// Use one connection per Segment Store to save up resources.
ClientConfig clientConfig = clientConfigBuilder.maxConnectionsPerSegmentStore(1).build();
connectionFactory = connectionFactoryRef.orElseGet(() -> new SocketConnectionFactoryImpl(clientConfig));
connectionPool = new ConnectionPoolImpl(clientConfig, connectionFactory);
segmentHelper = segmentHelperRef.orElseGet(() -> new SegmentHelper(connectionPool, hostStore, controllerExecutor));
GrpcAuthHelper authHelper = new GrpcAuthHelper(serviceConfig.getGRPCServerConfig().get().isAuthorizationEnabled(), grpcServerConfig.getTokenSigningKey(), grpcServerConfig.getAccessTokenTTLInSeconds());
streamStore = streamMetadataStoreRef.orElseGet(() -> StreamStoreFactory.createStore(storeClient, segmentHelper, authHelper, controllerExecutor));
log.info("Created the stream store.");
streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelper, controllerExecutor, eventExecutor, host.getHostId(), authHelper, serviceConfig.getRetentionFrequency().toMillis());
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelper, controllerExecutor, eventExecutor, host.getHostId(), serviceConfig.getTimeoutServiceConfig(), authHelper);
BucketServiceFactory bucketServiceFactory = new BucketServiceFactory(host.getHostId(), bucketStore, 1000);
Duration executionDurationRetention = serviceConfig.getRetentionFrequency();
PeriodicRetention retentionWork = new PeriodicRetention(streamStore, streamMetadataTasks, retentionExecutor, requestTracker);
retentionService = bucketServiceFactory.createRetentionService(executionDurationRetention, retentionWork::retention, retentionExecutor);
retentionService.startAsync();
retentionService.awaitRunning();
log.info("Started background periodic service for Retention.");
RetentionServiceHealthContributor retentionServiceHC = new RetentionServiceHealthContributor("retentionService", retentionService);
healthServiceManager.register(retentionServiceHC);
Duration executionDurationWatermarking = Duration.ofSeconds(Config.MINIMUM_WATERMARKING_FREQUENCY_IN_SECONDS);
watermarkingWork = new PeriodicWatermarking(streamStore, bucketStore, clientConfig, watermarkingExecutor, requestTracker);
watermarkingService = bucketServiceFactory.createWatermarkingService(executionDurationWatermarking, watermarkingWork::watermark, watermarkingExecutor);
watermarkingService.startAsync();
watermarkingService.awaitRunning();
log.info("Started background periodic service for Watermarking.");
WatermarkingServiceHealthContributor watermarkingServiceHC = new WatermarkingServiceHealthContributor("watermarkingService", watermarkingService);
healthServiceManager.register(watermarkingServiceHC);
// Controller has a mechanism to track the currently active controller host instances. On detecting a failure of
// any controller instance, the failure detector stores the failed HostId in a failed hosts directory (FH), and
// invokes the taskSweeper.sweepOrphanedTasks for each failed host. When all resources under the failed hostId
// are processed and deleted, that failed HostId is removed from FH folder.
// Moreover, on controller process startup, it detects any hostIds not in the currently active set of
// controllers and starts sweeping tasks orphaned by those hostIds.
TaskSweeper taskSweeper = new TaskSweeper(taskMetadataStore, host.getHostId(), controllerExecutor, streamMetadataTasks);
TxnSweeper txnSweeper = new TxnSweeper(streamStore, streamTransactionMetadataTasks, serviceConfig.getTimeoutServiceConfig().getMaxLeaseValue(), controllerExecutor);
RequestSweeper requestSweeper = new RequestSweeper(streamStore, controllerExecutor, streamMetadataTasks);
if (serviceConfig.isControllerClusterListenerEnabled()) {
cluster = new ClusterZKImpl((CuratorFramework) storeClient.getClient(), ClusterType.CONTROLLER);
}
kvtMetadataStore = kvtMetaStoreRef.orElseGet(() -> KVTableStoreFactory.createStore(storeClient, segmentHelper, authHelper, controllerExecutor, streamStore));
kvtMetadataTasks = new TableMetadataTasks(kvtMetadataStore, segmentHelper, controllerExecutor, eventExecutor, host.getHostId(), authHelper);
controllerService = new ControllerService(kvtMetadataStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, streamTransactionMetadataTasks, segmentHelper, controllerExecutor, cluster, requestTracker);
// Setup event processors.
setController(new LocalController(controllerService, grpcServerConfig.isAuthorizationEnabled(), grpcServerConfig.getTokenSigningKey()));
CompletableFuture<Void> eventProcessorFuture = CompletableFuture.completedFuture(null);
if (serviceConfig.getEventProcessorConfig().isPresent()) {
// Create ControllerEventProcessor object.
controllerEventProcessors = new ControllerEventProcessors(host.getHostId(), serviceConfig.getEventProcessorConfig().get(), localController, checkpointStore, streamStore, bucketStore, connectionPool, streamMetadataTasks, streamTransactionMetadataTasks, kvtMetadataStore, kvtMetadataTasks, eventExecutor);
// Bootstrap and start it asynchronously.
eventProcessorFuture = controllerEventProcessors.bootstrap(streamTransactionMetadataTasks, streamMetadataTasks, kvtMetadataTasks).thenAcceptAsync(x -> controllerEventProcessors.startAsync(), eventExecutor);
EventProcessorHealthContributor eventProcessorHC = new EventProcessorHealthContributor("eventProcessor", controllerEventProcessors);
healthServiceManager.register(eventProcessorHC);
}
// Setup and start controller cluster listener after all sweepers have been initialized.
if (serviceConfig.isControllerClusterListenerEnabled()) {
List<FailoverSweeper> failoverSweepers = new ArrayList<>();
failoverSweepers.add(taskSweeper);
failoverSweepers.add(txnSweeper);
failoverSweepers.add(requestSweeper);
if (serviceConfig.getEventProcessorConfig().isPresent()) {
assert controllerEventProcessors != null;
failoverSweepers.add(controllerEventProcessors);
}
controllerClusterListener = new ControllerClusterListener(host, cluster, controllerExecutor, failoverSweepers);
controllerClusterListener.startAsync();
ClusterListenerHealthContributor clusterListenerHC = new ClusterListenerHealthContributor("clusterListener", controllerClusterListener);
healthServiceManager.register(clusterListenerHC);
}
// Start the Health Service.
healthServiceManager.start();
// Start RPC server.
if (serviceConfig.getGRPCServerConfig().isPresent()) {
grpcServer = new GRPCServer(controllerService, grpcServerConfig, requestTracker);
grpcServer.startAsync();
grpcServer.awaitRunning();
GRPCServerHealthContributor grpcServerHC = new GRPCServerHealthContributor("GRPCServer", grpcServer);
healthServiceManager.register(grpcServerHC);
}
// Start REST server.
if (serviceConfig.getRestServerConfig().isPresent()) {
List<Object> resources = new ArrayList<>();
resources.add(new StreamMetadataResourceImpl(this.localController, controllerService, grpcServer.getAuthHandlerManager(), connectionFactory, clientConfig));
resources.add(new HealthImpl(grpcServer.getAuthHandlerManager(), healthServiceManager.getEndpoint()));
resources.add(new PingImpl());
MetricsProvider.getMetricsProvider().prometheusResource().ifPresent(resources::add);
restServer = new RESTServer(serviceConfig.getRestServerConfig().get(), Set.copyOf(resources));
restServer.startAsync();
restServer.awaitRunning();
}
// Wait for controller event processors to start.
if (serviceConfig.getEventProcessorConfig().isPresent()) {
// if store client has failed because of session expiration, there are two possibilities where
// controllerEventProcessors.awaitRunning may be stuck forever -
// 1. stream creation is retried indefinitely and cannot complete because of zk session expiration
// 2. event writer after stream creation throws exception.
// In both of above cases controllerEventProcessors.startAsync may not get called.
CompletableFuture.anyOf(storeClientFailureFuture, eventProcessorFuture.thenAccept(x -> controllerEventProcessors.awaitRunning())).join();
}
// Wait for controller cluster listeners to start.
if (serviceConfig.isControllerClusterListenerEnabled()) {
controllerClusterListener.awaitRunning();
}
} catch (Exception e) {
log.error("Failed trying to start controller services", e);
throw e;
} finally {
LoggerHelpers.traceLeave(log, this.objectId, "startUp", traceId);
}
}
use of io.pravega.controller.server.security.auth.GrpcAuthHelper in project pravega by pravega.
the class ControllerServiceWithStreamTest method setup.
@Before
public void setup() {
try {
zkServer = new TestingServerStarter().start();
} catch (Exception e) {
log.error("Error starting ZK server", e);
}
zkClient = CuratorFrameworkFactory.newClient(zkServer.getConnectString(), new ExponentialBackoffRetry(200, 10, 5000));
zkClient.start();
streamStore = spy(getStore());
kvtStore = spy(getKVTStore());
BucketStore bucketStore = StreamStoreFactory.createZKBucketStore(zkClient, executor);
TaskMetadataStore taskMetadataStore = TaskStoreFactory.createZKStore(zkClient, executor);
connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(URI.create("tcp://localhost")).build());
GrpcAuthHelper disabledAuthHelper = GrpcAuthHelper.getDisabledAuthHelper();
SegmentHelper segmentHelperMock = SegmentHelperMock.getSegmentHelperMock();
StreamMetrics.initialize();
TransactionMetrics.initialize();
EventHelper helperMock = EventHelperMock.getEventHelperMock(executor, "host", ((AbstractStreamMetadataStore) streamStore).getHostTaskIndex());
streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelperMock, executor, "host", disabledAuthHelper, helperMock);
kvtMetadataTasks = spy(new TableMetadataTasks(kvtStore, segmentHelperMock, executor, executor, "host", GrpcAuthHelper.getDisabledAuthHelper(), helperMock));
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelperMock, executor, "host", disabledAuthHelper);
StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStore, executor), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executor), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executor), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executor), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtMetadataTasks, executor), executor);
streamMetadataTasks.setRequestEventWriter(new ControllerEventStreamWriterMock(streamRequestHandler, executor));
consumer = new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, streamTransactionMetadataTasks, segmentHelperMock, executor, null, requestTracker);
}
use of io.pravega.controller.server.security.auth.GrpcAuthHelper in project pravega by pravega.
the class StreamTransactionMetadataTasksTest method writerInitializationTest.
@Test(timeout = 10000)
public void writerInitializationTest() throws Exception {
EventStreamWriterMock<CommitEvent> commitWriter = new EventStreamWriterMock<>();
EventStreamWriterMock<AbortEvent> abortWriter = new EventStreamWriterMock<>();
StreamMetadataStore streamStoreMock = spy(StreamStoreFactory.createZKStore(zkClient, executor));
final long leasePeriod = 5000;
// region close before initialize
txnTasks = new StreamTransactionMetadataTasks(streamStoreMock, SegmentHelperMock.getSegmentHelperMock(), executor, "host", new GrpcAuthHelper(this.authEnabled, "secret", 300));
CompletableFuture<Void> future = txnTasks.writeCommitEvent(new CommitEvent("scope", "stream", 0));
assertFalse(future.isDone());
txnTasks.close();
AssertExtensions.assertFutureThrows("", future, e -> Exceptions.unwrap(e) instanceof CancellationException);
// endregion
// region test initialize writers with client factory
txnTasks = new StreamTransactionMetadataTasks(streamStoreMock, SegmentHelperMock.getSegmentHelperMock(), executor, "host", new GrpcAuthHelper(this.authEnabled, "secret", 300));
future = txnTasks.writeCommitEvent(new CommitEvent("scope", "stream", 0));
EventStreamClientFactory cfMock = mock(EventStreamClientFactory.class);
ControllerEventProcessorConfig eventProcConfigMock = mock(ControllerEventProcessorConfig.class);
String commitStream = "commitStream";
doAnswer(x -> commitStream).when(eventProcConfigMock).getCommitStreamName();
doAnswer(x -> commitWriter).when(cfMock).createEventWriter(eq(commitStream), any(), any());
String abortStream = "abortStream";
doAnswer(x -> abortStream).when(eventProcConfigMock).getAbortStreamName();
doAnswer(x -> abortWriter).when(cfMock).createEventWriter(eq(abortStream), any(), any());
// future should not have completed as we have not initialized the writers.
assertFalse(future.isDone());
// initialize the writers. write future should have completed now.
txnTasks.initializeStreamWriters(cfMock, eventProcConfigMock);
assertTrue(Futures.await(future));
txnTasks.close();
// endregion
// region test method calls and initialize writers with direct writer set up method call
txnTasks = new StreamTransactionMetadataTasks(streamStoreMock, SegmentHelperMock.getSegmentHelperMock(), executor, "host", new GrpcAuthHelper(this.authEnabled, "secret", 300));
streamStore.createScope(SCOPE, null, executor).join();
streamStore.createStream(SCOPE, STREAM, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), 1L, null, executor).join();
streamStore.setState(SCOPE, STREAM, State.ACTIVE, null, executor).join();
CompletableFuture<Pair<VersionedTransactionData, List<StreamSegmentRecord>>> createFuture = txnTasks.createTxn(SCOPE, STREAM, leasePeriod, 0L, 0L);
// create and ping transactions should not wait for writer initialization and complete immediately.
createFuture.join();
assertTrue(Futures.await(createFuture));
UUID txnId = createFuture.join().getKey().getId();
CompletableFuture<PingTxnStatus> pingFuture = txnTasks.pingTxn(SCOPE, STREAM, txnId, leasePeriod, 0L);
assertTrue(Futures.await(pingFuture));
CompletableFuture<TxnStatus> commitFuture = txnTasks.commitTxn(SCOPE, STREAM, txnId, 0L);
assertFalse(commitFuture.isDone());
txnTasks.initializeStreamWriters(commitWriter, abortWriter);
assertTrue(Futures.await(commitFuture));
UUID txnId2 = txnTasks.createTxn(SCOPE, STREAM, leasePeriod, 0L, 1024 * 1024L).join().getKey().getId();
assertTrue(Futures.await(txnTasks.abortTxn(SCOPE, STREAM, txnId2, null, 0L)));
}
use of io.pravega.controller.server.security.auth.GrpcAuthHelper in project pravega by pravega.
the class ControllerServiceWithKVTableTest method setup.
@Before
public void setup() {
segmentHelperMock = SegmentHelperMock.getSegmentHelperMockForTables(executor);
streamStore = spy(getStore());
kvtStore = spy(getKVTStore());
BucketStore bucketStore = StreamStoreFactory.createZKBucketStore(PRAVEGA_ZK_CURATOR_RESOURCE.client, executor);
TaskMetadataStore taskMetadataStore = TaskStoreFactory.createZKStore(PRAVEGA_ZK_CURATOR_RESOURCE.client, executor);
connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(URI.create("tcp://localhost")).build());
GrpcAuthHelper disabledAuthHelper = GrpcAuthHelper.getDisabledAuthHelper();
StreamMetrics.initialize();
TransactionMetrics.initialize();
EventHelper helperMock = EventHelperMock.getEventHelperMock(executor, "host", ((AbstractStreamMetadataStore) streamStore).getHostTaskIndex());
streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelperMock, executor, "host", disabledAuthHelper, helperMock);
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelperMock, executor, "host", disabledAuthHelper);
kvtMetadataTasks = spy(new TableMetadataTasks(kvtStore, segmentHelperMock, executor, executor, "host", GrpcAuthHelper.getDisabledAuthHelper(), helperMock));
StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStore, executor), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executor), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executor), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executor), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtMetadataTasks, executor), executor);
streamMetadataTasks.setRequestEventWriter(new ControllerEventStreamWriterMock(streamRequestHandler, executor));
consumer = new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, streamTransactionMetadataTasks, segmentHelperMock, executor, null, requestTracker);
}
Aggregations