Search in sources :

Example 1 with TableMetadataTasks

use of io.pravega.controller.task.KeyValueTable.TableMetadataTasks in project pravega by pravega.

the class ControllerServiceStarter method startUp.

@Override
protected void startUp() {
    long traceId = LoggerHelpers.traceEnterWithContext(log, this.objectId, "startUp");
    log.info("Initiating controller service startUp");
    log.info("Controller serviceConfig = {}", serviceConfig.toString());
    log.info("Event processors enabled = {}", serviceConfig.getEventProcessorConfig().isPresent());
    log.info("Cluster listener enabled = {}", serviceConfig.isControllerClusterListenerEnabled());
    log.info("    Host monitor enabled = {}", serviceConfig.getHostMonitorConfig().isHostMonitorEnabled());
    log.info("     gRPC server enabled = {}", serviceConfig.getGRPCServerConfig().isPresent());
    log.info("     REST server enabled = {}", serviceConfig.getRestServerConfig().isPresent());
    final BucketStore bucketStore;
    final TaskMetadataStore taskMetadataStore;
    final HostControllerStore hostStore;
    final CheckpointStore checkpointStore;
    try {
        // Initialize the executor service.
        controllerExecutor = ExecutorServiceHelpers.newScheduledThreadPool(serviceConfig.getThreadPoolSize(), "controllerpool");
        eventExecutor = ExecutorServiceHelpers.newScheduledThreadPool(serviceConfig.getThreadPoolSize(), "eventprocessor");
        retentionExecutor = ExecutorServiceHelpers.newScheduledThreadPool(Config.RETENTION_THREAD_POOL_SIZE, "retentionpool");
        watermarkingExecutor = ExecutorServiceHelpers.newScheduledThreadPool(Config.WATERMARKING_THREAD_POOL_SIZE, "watermarkingpool");
        bucketStore = StreamStoreFactory.createBucketStore(storeClient, controllerExecutor);
        log.info("Created the bucket store.");
        taskMetadataStore = TaskStoreFactory.createStore(storeClient, controllerExecutor);
        log.info("Created the task store.");
        hostStore = HostStoreFactory.createStore(serviceConfig.getHostMonitorConfig(), storeClient);
        log.info("Created the host store.");
        checkpointStore = CheckpointStoreFactory.create(storeClient);
        log.info("Created the checkpoint store.");
        // Initialize Stream and Transaction metrics.
        StreamMetrics.initialize();
        TransactionMetrics.initialize();
        // On each controller process restart, we use a fresh hostId,
        // which is a combination of hostname and random GUID.
        String hostName = getHostName();
        Host host = new Host(hostName, getPort(), UUID.randomUUID().toString());
        // Create a RequestTracker instance to trace client requests end-to-end.
        GRPCServerConfig grpcServerConfig = serviceConfig.getGRPCServerConfig().get();
        RequestTracker requestTracker = new RequestTracker(grpcServerConfig.isRequestTracingEnabled());
        // Create a Health Service Manager instance.
        healthServiceManager = new HealthServiceManager(serviceConfig.getHealthCheckFrequency());
        if (serviceConfig.getHostMonitorConfig().isHostMonitorEnabled()) {
            // Start the Segment Container Monitor.
            monitor = new SegmentContainerMonitor(hostStore, (CuratorFramework) storeClient.getClient(), new UniformContainerBalancer(), serviceConfig.getHostMonitorConfig().getHostMonitorMinRebalanceInterval());
            monitor.startAsync();
            log.info("Started Segment Container Monitor service.");
            SegmentContainerMonitorHealthContributor segmentContainerMonitorHC = new SegmentContainerMonitorHealthContributor("segmentContainerMonitor", monitor);
            healthServiceManager.register(segmentContainerMonitorHC);
        }
        // This client config is used by the segment store helper (SegmentHelper) to connect to the segment store.
        ClientConfig.ClientConfigBuilder clientConfigBuilder = ClientConfig.builder().controllerURI(URI.create((grpcServerConfig.isTlsEnabled() ? "tls://" : "tcp://") + "localhost:" + grpcServerConfig.getPort())).trustStore(grpcServerConfig.getTlsTrustStore()).validateHostName(false);
        Optional<Boolean> tlsEnabledForSegmentStore = BooleanUtils.extract(serviceConfig.getTlsEnabledForSegmentStore());
        if (tlsEnabledForSegmentStore.isPresent()) {
            clientConfigBuilder.enableTlsToSegmentStore(tlsEnabledForSegmentStore.get());
        }
        // Use one connection per Segment Store to save up resources.
        ClientConfig clientConfig = clientConfigBuilder.maxConnectionsPerSegmentStore(1).build();
        connectionFactory = connectionFactoryRef.orElseGet(() -> new SocketConnectionFactoryImpl(clientConfig));
        connectionPool = new ConnectionPoolImpl(clientConfig, connectionFactory);
        segmentHelper = segmentHelperRef.orElseGet(() -> new SegmentHelper(connectionPool, hostStore, controllerExecutor));
        GrpcAuthHelper authHelper = new GrpcAuthHelper(serviceConfig.getGRPCServerConfig().get().isAuthorizationEnabled(), grpcServerConfig.getTokenSigningKey(), grpcServerConfig.getAccessTokenTTLInSeconds());
        streamStore = streamMetadataStoreRef.orElseGet(() -> StreamStoreFactory.createStore(storeClient, segmentHelper, authHelper, controllerExecutor));
        log.info("Created the stream store.");
        streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelper, controllerExecutor, eventExecutor, host.getHostId(), authHelper, serviceConfig.getRetentionFrequency().toMillis());
        streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelper, controllerExecutor, eventExecutor, host.getHostId(), serviceConfig.getTimeoutServiceConfig(), authHelper);
        BucketServiceFactory bucketServiceFactory = new BucketServiceFactory(host.getHostId(), bucketStore, 1000);
        Duration executionDurationRetention = serviceConfig.getRetentionFrequency();
        PeriodicRetention retentionWork = new PeriodicRetention(streamStore, streamMetadataTasks, retentionExecutor, requestTracker);
        retentionService = bucketServiceFactory.createRetentionService(executionDurationRetention, retentionWork::retention, retentionExecutor);
        retentionService.startAsync();
        retentionService.awaitRunning();
        log.info("Started background periodic service for Retention.");
        RetentionServiceHealthContributor retentionServiceHC = new RetentionServiceHealthContributor("retentionService", retentionService);
        healthServiceManager.register(retentionServiceHC);
        Duration executionDurationWatermarking = Duration.ofSeconds(Config.MINIMUM_WATERMARKING_FREQUENCY_IN_SECONDS);
        watermarkingWork = new PeriodicWatermarking(streamStore, bucketStore, clientConfig, watermarkingExecutor, requestTracker);
        watermarkingService = bucketServiceFactory.createWatermarkingService(executionDurationWatermarking, watermarkingWork::watermark, watermarkingExecutor);
        watermarkingService.startAsync();
        watermarkingService.awaitRunning();
        log.info("Started background periodic service for Watermarking.");
        WatermarkingServiceHealthContributor watermarkingServiceHC = new WatermarkingServiceHealthContributor("watermarkingService", watermarkingService);
        healthServiceManager.register(watermarkingServiceHC);
        // Controller has a mechanism to track the currently active controller host instances. On detecting a failure of
        // any controller instance, the failure detector stores the failed HostId in a failed hosts directory (FH), and
        // invokes the taskSweeper.sweepOrphanedTasks for each failed host. When all resources under the failed hostId
        // are processed and deleted, that failed HostId is removed from FH folder.
        // Moreover, on controller process startup, it detects any hostIds not in the currently active set of
        // controllers and starts sweeping tasks orphaned by those hostIds.
        TaskSweeper taskSweeper = new TaskSweeper(taskMetadataStore, host.getHostId(), controllerExecutor, streamMetadataTasks);
        TxnSweeper txnSweeper = new TxnSweeper(streamStore, streamTransactionMetadataTasks, serviceConfig.getTimeoutServiceConfig().getMaxLeaseValue(), controllerExecutor);
        RequestSweeper requestSweeper = new RequestSweeper(streamStore, controllerExecutor, streamMetadataTasks);
        if (serviceConfig.isControllerClusterListenerEnabled()) {
            cluster = new ClusterZKImpl((CuratorFramework) storeClient.getClient(), ClusterType.CONTROLLER);
        }
        kvtMetadataStore = kvtMetaStoreRef.orElseGet(() -> KVTableStoreFactory.createStore(storeClient, segmentHelper, authHelper, controllerExecutor, streamStore));
        kvtMetadataTasks = new TableMetadataTasks(kvtMetadataStore, segmentHelper, controllerExecutor, eventExecutor, host.getHostId(), authHelper);
        controllerService = new ControllerService(kvtMetadataStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, streamTransactionMetadataTasks, segmentHelper, controllerExecutor, cluster, requestTracker);
        // Setup event processors.
        setController(new LocalController(controllerService, grpcServerConfig.isAuthorizationEnabled(), grpcServerConfig.getTokenSigningKey()));
        CompletableFuture<Void> eventProcessorFuture = CompletableFuture.completedFuture(null);
        if (serviceConfig.getEventProcessorConfig().isPresent()) {
            // Create ControllerEventProcessor object.
            controllerEventProcessors = new ControllerEventProcessors(host.getHostId(), serviceConfig.getEventProcessorConfig().get(), localController, checkpointStore, streamStore, bucketStore, connectionPool, streamMetadataTasks, streamTransactionMetadataTasks, kvtMetadataStore, kvtMetadataTasks, eventExecutor);
            // Bootstrap and start it asynchronously.
            eventProcessorFuture = controllerEventProcessors.bootstrap(streamTransactionMetadataTasks, streamMetadataTasks, kvtMetadataTasks).thenAcceptAsync(x -> controllerEventProcessors.startAsync(), eventExecutor);
            EventProcessorHealthContributor eventProcessorHC = new EventProcessorHealthContributor("eventProcessor", controllerEventProcessors);
            healthServiceManager.register(eventProcessorHC);
        }
        // Setup and start controller cluster listener after all sweepers have been initialized.
        if (serviceConfig.isControllerClusterListenerEnabled()) {
            List<FailoverSweeper> failoverSweepers = new ArrayList<>();
            failoverSweepers.add(taskSweeper);
            failoverSweepers.add(txnSweeper);
            failoverSweepers.add(requestSweeper);
            if (serviceConfig.getEventProcessorConfig().isPresent()) {
                assert controllerEventProcessors != null;
                failoverSweepers.add(controllerEventProcessors);
            }
            controllerClusterListener = new ControllerClusterListener(host, cluster, controllerExecutor, failoverSweepers);
            controllerClusterListener.startAsync();
            ClusterListenerHealthContributor clusterListenerHC = new ClusterListenerHealthContributor("clusterListener", controllerClusterListener);
            healthServiceManager.register(clusterListenerHC);
        }
        // Start the Health Service.
        healthServiceManager.start();
        // Start RPC server.
        if (serviceConfig.getGRPCServerConfig().isPresent()) {
            grpcServer = new GRPCServer(controllerService, grpcServerConfig, requestTracker);
            grpcServer.startAsync();
            grpcServer.awaitRunning();
            GRPCServerHealthContributor grpcServerHC = new GRPCServerHealthContributor("GRPCServer", grpcServer);
            healthServiceManager.register(grpcServerHC);
        }
        // Start REST server.
        if (serviceConfig.getRestServerConfig().isPresent()) {
            List<Object> resources = new ArrayList<>();
            resources.add(new StreamMetadataResourceImpl(this.localController, controllerService, grpcServer.getAuthHandlerManager(), connectionFactory, clientConfig));
            resources.add(new HealthImpl(grpcServer.getAuthHandlerManager(), healthServiceManager.getEndpoint()));
            resources.add(new PingImpl());
            MetricsProvider.getMetricsProvider().prometheusResource().ifPresent(resources::add);
            restServer = new RESTServer(serviceConfig.getRestServerConfig().get(), Set.copyOf(resources));
            restServer.startAsync();
            restServer.awaitRunning();
        }
        // Wait for controller event processors to start.
        if (serviceConfig.getEventProcessorConfig().isPresent()) {
            // if store client has failed because of session expiration, there are two possibilities where
            // controllerEventProcessors.awaitRunning may be stuck forever -
            // 1. stream creation is retried indefinitely and cannot complete because of zk session expiration
            // 2. event writer after stream creation throws exception.
            // In both of above cases controllerEventProcessors.startAsync may not get called.
            CompletableFuture.anyOf(storeClientFailureFuture, eventProcessorFuture.thenAccept(x -> controllerEventProcessors.awaitRunning())).join();
        }
        // Wait for controller cluster listeners to start.
        if (serviceConfig.isControllerClusterListenerEnabled()) {
            controllerClusterListener.awaitRunning();
        }
    } catch (Exception e) {
        log.error("Failed trying to start controller services", e);
        throw e;
    } finally {
        LoggerHelpers.traceLeave(log, this.objectId, "startUp", traceId);
    }
}
Also used : GRPCServer(io.pravega.controller.server.rpc.grpc.GRPCServer) ControllerEventProcessors(io.pravega.controller.server.eventProcessor.ControllerEventProcessors) CheckpointStore(io.pravega.controller.store.checkpoint.CheckpointStore) StringUtils(org.apache.commons.lang3.StringUtils) InetAddress(java.net.InetAddress) Cluster(io.pravega.common.cluster.Cluster) LocalController(io.pravega.controller.server.eventProcessor.LocalController) HealthServiceManager(io.pravega.shared.health.HealthServiceManager) TaskMetadataStore(io.pravega.controller.store.task.TaskMetadataStore) TxnSweeper(io.pravega.controller.task.Stream.TxnSweeper) Duration(java.time.Duration) PeriodicRetention(io.pravega.controller.server.bucket.PeriodicRetention) WatermarkingServiceHealthContributor(io.pravega.controller.server.health.WatermarkingServiceHealthContributor) URI(java.net.URI) ClusterZKImpl(io.pravega.common.cluster.zkImpl.ClusterZKImpl) ControllerClusterListener(io.pravega.controller.fault.ControllerClusterListener) PeriodicWatermarking(io.pravega.controller.server.bucket.PeriodicWatermarking) RESTServer(io.pravega.shared.rest.RESTServer) Set(java.util.Set) RequestSweeper(io.pravega.controller.task.Stream.RequestSweeper) RequestTracker(io.pravega.common.tracing.RequestTracker) UUID(java.util.UUID) KVTableMetadataStore(io.pravega.controller.store.kvtable.KVTableMetadataStore) CountDownLatch(java.util.concurrent.CountDownLatch) MetricsProvider(io.pravega.shared.metrics.MetricsProvider) RetentionServiceHealthContributor(io.pravega.controller.server.health.RetentionServiceHealthContributor) List(java.util.List) Slf4j(lombok.extern.slf4j.Slf4j) CuratorFramework(org.apache.curator.framework.CuratorFramework) ClusterType(io.pravega.common.cluster.ClusterType) Config(io.pravega.controller.util.Config) GRPCServerHealthContributor(io.pravega.controller.server.health.GRPCServerHealthContributor) Optional(java.util.Optional) StoreType(io.pravega.controller.store.client.StoreType) BucketServiceFactory(io.pravega.controller.server.bucket.BucketServiceFactory) StreamMetadataStore(io.pravega.controller.store.stream.StreamMetadataStore) GrpcAuthHelper(io.pravega.controller.server.security.auth.GrpcAuthHelper) GRPCServerConfig(io.pravega.controller.server.rpc.grpc.GRPCServerConfig) KVTableStoreFactory(io.pravega.controller.store.kvtable.KVTableStoreFactory) StreamMetrics(io.pravega.controller.metrics.StreamMetrics) StreamStoreFactory(io.pravega.controller.store.stream.StreamStoreFactory) TransactionMetrics(io.pravega.controller.metrics.TransactionMetrics) Getter(lombok.Getter) ConnectionFactory(io.pravega.client.connection.impl.ConnectionFactory) BooleanUtils(io.pravega.common.util.BooleanUtils) CheckpointStoreFactory(io.pravega.controller.store.checkpoint.CheckpointStoreFactory) CompletableFuture(java.util.concurrent.CompletableFuture) ConnectionPoolImpl(io.pravega.client.connection.impl.ConnectionPoolImpl) PingImpl(io.pravega.controller.server.rest.resources.PingImpl) EventProcessorHealthContributor(io.pravega.controller.server.health.EventProcessorHealthContributor) StoreClient(io.pravega.controller.store.client.StoreClient) ArrayList(java.util.ArrayList) BucketStore(io.pravega.controller.store.stream.BucketStore) UniformContainerBalancer(io.pravega.controller.fault.UniformContainerBalancer) AccessLevel(lombok.AccessLevel) AbstractIdleService(com.google.common.util.concurrent.AbstractIdleService) BucketManager(io.pravega.controller.server.bucket.BucketManager) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) StreamMetadataTasks(io.pravega.controller.task.Stream.StreamMetadataTasks) SocketConnectionFactoryImpl(io.pravega.client.connection.impl.SocketConnectionFactoryImpl) Host(io.pravega.common.cluster.Host) LoggerHelpers(io.pravega.common.LoggerHelpers) Callbacks(io.pravega.common.function.Callbacks) ConnectionPool(io.pravega.client.connection.impl.ConnectionPool) TableMetadataTasks(io.pravega.controller.task.KeyValueTable.TableMetadataTasks) UnknownHostException(java.net.UnknownHostException) FailoverSweeper(io.pravega.controller.fault.FailoverSweeper) HostStoreFactory(io.pravega.controller.store.host.HostStoreFactory) TimeUnit(java.util.concurrent.TimeUnit) TaskStoreFactory(io.pravega.controller.store.task.TaskStoreFactory) SegmentContainerMonitorHealthContributor(io.pravega.controller.server.health.SegmentContainerMonitorHealthContributor) HealthImpl(io.pravega.shared.health.bindings.resources.HealthImpl) HostControllerStore(io.pravega.controller.store.host.HostControllerStore) StreamTransactionMetadataTasks(io.pravega.controller.task.Stream.StreamTransactionMetadataTasks) TaskSweeper(io.pravega.controller.task.TaskSweeper) SegmentContainerMonitor(io.pravega.controller.fault.SegmentContainerMonitor) ClusterListenerHealthContributor(io.pravega.controller.server.health.ClusterListenerHealthContributor) StreamMetadataResourceImpl(io.pravega.controller.server.rest.resources.StreamMetadataResourceImpl) VisibleForTesting(com.google.common.annotations.VisibleForTesting) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) ClientConfig(io.pravega.client.ClientConfig) GRPCServerHealthContributor(io.pravega.controller.server.health.GRPCServerHealthContributor) ArrayList(java.util.ArrayList) TxnSweeper(io.pravega.controller.task.Stream.TxnSweeper) CuratorFramework(org.apache.curator.framework.CuratorFramework) BucketServiceFactory(io.pravega.controller.server.bucket.BucketServiceFactory) ControllerEventProcessors(io.pravega.controller.server.eventProcessor.ControllerEventProcessors) HostControllerStore(io.pravega.controller.store.host.HostControllerStore) StreamTransactionMetadataTasks(io.pravega.controller.task.Stream.StreamTransactionMetadataTasks) TaskSweeper(io.pravega.controller.task.TaskSweeper) StreamMetadataTasks(io.pravega.controller.task.Stream.StreamMetadataTasks) PingImpl(io.pravega.controller.server.rest.resources.PingImpl) UniformContainerBalancer(io.pravega.controller.fault.UniformContainerBalancer) HealthServiceManager(io.pravega.shared.health.HealthServiceManager) TaskMetadataStore(io.pravega.controller.store.task.TaskMetadataStore) SegmentContainerMonitorHealthContributor(io.pravega.controller.server.health.SegmentContainerMonitorHealthContributor) Duration(java.time.Duration) SocketConnectionFactoryImpl(io.pravega.client.connection.impl.SocketConnectionFactoryImpl) EventProcessorHealthContributor(io.pravega.controller.server.health.EventProcessorHealthContributor) ControllerClusterListener(io.pravega.controller.fault.ControllerClusterListener) RetentionServiceHealthContributor(io.pravega.controller.server.health.RetentionServiceHealthContributor) StreamMetadataResourceImpl(io.pravega.controller.server.rest.resources.StreamMetadataResourceImpl) ClusterZKImpl(io.pravega.common.cluster.zkImpl.ClusterZKImpl) ConnectionPoolImpl(io.pravega.client.connection.impl.ConnectionPoolImpl) GRPCServerConfig(io.pravega.controller.server.rpc.grpc.GRPCServerConfig) PeriodicRetention(io.pravega.controller.server.bucket.PeriodicRetention) RequestSweeper(io.pravega.controller.task.Stream.RequestSweeper) CheckpointStore(io.pravega.controller.store.checkpoint.CheckpointStore) RequestTracker(io.pravega.common.tracing.RequestTracker) SegmentContainerMonitor(io.pravega.controller.fault.SegmentContainerMonitor) FailoverSweeper(io.pravega.controller.fault.FailoverSweeper) GRPCServer(io.pravega.controller.server.rpc.grpc.GRPCServer) LocalController(io.pravega.controller.server.eventProcessor.LocalController) HealthImpl(io.pravega.shared.health.bindings.resources.HealthImpl) TableMetadataTasks(io.pravega.controller.task.KeyValueTable.TableMetadataTasks) RESTServer(io.pravega.shared.rest.RESTServer) BucketStore(io.pravega.controller.store.stream.BucketStore) ClientConfig(io.pravega.client.ClientConfig) ClusterListenerHealthContributor(io.pravega.controller.server.health.ClusterListenerHealthContributor) Host(io.pravega.common.cluster.Host) UnknownHostException(java.net.UnknownHostException) GrpcAuthHelper(io.pravega.controller.server.security.auth.GrpcAuthHelper) WatermarkingServiceHealthContributor(io.pravega.controller.server.health.WatermarkingServiceHealthContributor) PeriodicWatermarking(io.pravega.controller.server.bucket.PeriodicWatermarking)

Example 2 with TableMetadataTasks

use of io.pravega.controller.task.KeyValueTable.TableMetadataTasks in project pravega by pravega.

the class ControllerServiceWithKVTableTest method setup.

@Before
public void setup() {
    segmentHelperMock = SegmentHelperMock.getSegmentHelperMockForTables(executor);
    streamStore = spy(getStore());
    kvtStore = spy(getKVTStore());
    BucketStore bucketStore = StreamStoreFactory.createZKBucketStore(PRAVEGA_ZK_CURATOR_RESOURCE.client, executor);
    TaskMetadataStore taskMetadataStore = TaskStoreFactory.createZKStore(PRAVEGA_ZK_CURATOR_RESOURCE.client, executor);
    connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(URI.create("tcp://localhost")).build());
    GrpcAuthHelper disabledAuthHelper = GrpcAuthHelper.getDisabledAuthHelper();
    StreamMetrics.initialize();
    TransactionMetrics.initialize();
    EventHelper helperMock = EventHelperMock.getEventHelperMock(executor, "host", ((AbstractStreamMetadataStore) streamStore).getHostTaskIndex());
    streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelperMock, executor, "host", disabledAuthHelper, helperMock);
    streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelperMock, executor, "host", disabledAuthHelper);
    kvtMetadataTasks = spy(new TableMetadataTasks(kvtStore, segmentHelperMock, executor, executor, "host", GrpcAuthHelper.getDisabledAuthHelper(), helperMock));
    StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStore, executor), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executor), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executor), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executor), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtMetadataTasks, executor), executor);
    streamMetadataTasks.setRequestEventWriter(new ControllerEventStreamWriterMock(streamRequestHandler, executor));
    consumer = new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, streamTransactionMetadataTasks, segmentHelperMock, executor, null, requestTracker);
}
Also used : DeleteScopeTask(io.pravega.controller.server.eventProcessor.requesthandlers.DeleteScopeTask) SealStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.SealStreamTask) TaskMetadataStore(io.pravega.controller.store.task.TaskMetadataStore) UpdateStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.UpdateStreamTask) CreateReaderGroupTask(io.pravega.controller.server.eventProcessor.requesthandlers.CreateReaderGroupTask) DeleteReaderGroupTask(io.pravega.controller.server.eventProcessor.requesthandlers.DeleteReaderGroupTask) SocketConnectionFactoryImpl(io.pravega.client.connection.impl.SocketConnectionFactoryImpl) ScaleOperationTask(io.pravega.controller.server.eventProcessor.requesthandlers.ScaleOperationTask) AutoScaleTask(io.pravega.controller.server.eventProcessor.requesthandlers.AutoScaleTask) StreamRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.StreamRequestHandler) GrpcAuthHelper(io.pravega.controller.server.security.auth.GrpcAuthHelper) DeleteStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.DeleteStreamTask) UpdateReaderGroupTask(io.pravega.controller.server.eventProcessor.requesthandlers.UpdateReaderGroupTask) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) EventHelper(io.pravega.controller.task.EventHelper) StreamTransactionMetadataTasks(io.pravega.controller.task.Stream.StreamTransactionMetadataTasks) TableMetadataTasks(io.pravega.controller.task.KeyValueTable.TableMetadataTasks) BucketStore(io.pravega.controller.store.stream.BucketStore) StreamMetadataTasks(io.pravega.controller.task.Stream.StreamMetadataTasks) TruncateStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.TruncateStreamTask) Before(org.junit.Before)

Example 3 with TableMetadataTasks

use of io.pravega.controller.task.KeyValueTable.TableMetadataTasks in project pravega by pravega.

the class ControllerServiceWithStreamTest method setup.

@Before
public void setup() {
    try {
        zkServer = new TestingServerStarter().start();
    } catch (Exception e) {
        log.error("Error starting ZK server", e);
    }
    zkClient = CuratorFrameworkFactory.newClient(zkServer.getConnectString(), new ExponentialBackoffRetry(200, 10, 5000));
    zkClient.start();
    streamStore = spy(getStore());
    kvtStore = spy(getKVTStore());
    BucketStore bucketStore = StreamStoreFactory.createZKBucketStore(zkClient, executor);
    TaskMetadataStore taskMetadataStore = TaskStoreFactory.createZKStore(zkClient, executor);
    connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(URI.create("tcp://localhost")).build());
    GrpcAuthHelper disabledAuthHelper = GrpcAuthHelper.getDisabledAuthHelper();
    SegmentHelper segmentHelperMock = SegmentHelperMock.getSegmentHelperMock();
    StreamMetrics.initialize();
    TransactionMetrics.initialize();
    EventHelper helperMock = EventHelperMock.getEventHelperMock(executor, "host", ((AbstractStreamMetadataStore) streamStore).getHostTaskIndex());
    streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelperMock, executor, "host", disabledAuthHelper, helperMock);
    kvtMetadataTasks = spy(new TableMetadataTasks(kvtStore, segmentHelperMock, executor, executor, "host", GrpcAuthHelper.getDisabledAuthHelper(), helperMock));
    streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelperMock, executor, "host", disabledAuthHelper);
    StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStore, executor), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executor), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executor), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executor), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtMetadataTasks, executor), executor);
    streamMetadataTasks.setRequestEventWriter(new ControllerEventStreamWriterMock(streamRequestHandler, executor));
    consumer = new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, streamTransactionMetadataTasks, segmentHelperMock, executor, null, requestTracker);
}
Also used : DeleteScopeTask(io.pravega.controller.server.eventProcessor.requesthandlers.DeleteScopeTask) ExponentialBackoffRetry(org.apache.curator.retry.ExponentialBackoffRetry) CreateReaderGroupTask(io.pravega.controller.server.eventProcessor.requesthandlers.CreateReaderGroupTask) AutoScaleTask(io.pravega.controller.server.eventProcessor.requesthandlers.AutoScaleTask) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) StreamTransactionMetadataTasks(io.pravega.controller.task.Stream.StreamTransactionMetadataTasks) TableMetadataTasks(io.pravega.controller.task.KeyValueTable.TableMetadataTasks) BucketStore(io.pravega.controller.store.stream.BucketStore) StreamMetadataTasks(io.pravega.controller.task.Stream.StreamMetadataTasks) SealStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.SealStreamTask) TestingServerStarter(io.pravega.test.common.TestingServerStarter) TaskMetadataStore(io.pravega.controller.store.task.TaskMetadataStore) UpdateStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.UpdateStreamTask) DeleteReaderGroupTask(io.pravega.controller.server.eventProcessor.requesthandlers.DeleteReaderGroupTask) SocketConnectionFactoryImpl(io.pravega.client.connection.impl.SocketConnectionFactoryImpl) ScaleOperationTask(io.pravega.controller.server.eventProcessor.requesthandlers.ScaleOperationTask) ExecutionException(java.util.concurrent.ExecutionException) StreamRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.StreamRequestHandler) GrpcAuthHelper(io.pravega.controller.server.security.auth.GrpcAuthHelper) DeleteStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.DeleteStreamTask) UpdateReaderGroupTask(io.pravega.controller.server.eventProcessor.requesthandlers.UpdateReaderGroupTask) EventHelper(io.pravega.controller.task.EventHelper) TruncateStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.TruncateStreamTask) Before(org.junit.Before)

Example 4 with TableMetadataTasks

use of io.pravega.controller.task.KeyValueTable.TableMetadataTasks in project pravega by pravega.

the class ControllerEventProcessorsTest method testTruncate.

@Test(timeout = 10000L)
public void testTruncate() throws CheckpointStoreException, InterruptedException {
    LocalController controller = mock(LocalController.class);
    CheckpointStore checkpointStore = mock(CheckpointStore.class);
    StreamMetadataStore streamStore = mock(StreamMetadataStore.class);
    BucketStore bucketStore = mock(BucketStore.class);
    ConnectionPool connectionPool = mock(ConnectionPool.class);
    StreamMetadataTasks streamMetadataTasks = mock(StreamMetadataTasks.class);
    StreamTransactionMetadataTasks streamTransactionMetadataTasks = mock(StreamTransactionMetadataTasks.class);
    KVTableMetadataStore kvtStore = mock(KVTableMetadataStore.class);
    TableMetadataTasks kvtTasks = mock(TableMetadataTasks.class);
    ControllerEventProcessorConfig config = ControllerEventProcessorConfigImpl.withDefault();
    EventProcessorSystem system = mock(EventProcessorSystem.class);
    Map<SegmentWithRange, Long> map1 = new HashMap<>();
    map1.put(new SegmentWithRange(new Segment("scope", "stream", 0L), 0.0, 0.33), 10L);
    map1.put(new SegmentWithRange(new Segment("scope", "stream", 1L), 0.33, 0.66), 10L);
    map1.put(new SegmentWithRange(new Segment("scope", "stream", 2L), 0.66, 1.0), 20L);
    Map<SegmentWithRange, Long> map2 = new HashMap<>();
    map2.put(new SegmentWithRange(new Segment("scope", "stream", 0L), 0.0, 0.33), 20L);
    map2.put(new SegmentWithRange(new Segment("scope", "stream", 2L), 0.66, 1.0), 10L);
    Map<SegmentWithRange, Long> map3 = new HashMap<>();
    map3.put(new SegmentWithRange(new Segment("scope", "stream", 3L), 0.0, 0.33), 0L);
    map3.put(new SegmentWithRange(new Segment("scope", "stream", 4L), 0.33, 0.66), 10L);
    map3.put(new SegmentWithRange(new Segment("scope", "stream", 5L), 0.66, 1.0), 20L);
    PositionImpl position1 = new PositionImpl(map1);
    PositionImpl position2 = new PositionImpl(map2);
    PositionImpl position3 = new PositionImpl(map3);
    doReturn(getProcessor()).when(system).createEventProcessorGroup(any(), any(), any());
    doReturn(CompletableFuture.completedFuture(null)).when(controller).createScope(anyString());
    doReturn(CompletableFuture.completedFuture(null)).when(controller).createInternalStream(anyString(), anyString(), any());
    doNothing().when(streamMetadataTasks).initializeStreamWriters(any(), anyString());
    doNothing().when(streamTransactionMetadataTasks).initializeStreamWriters(any(EventStreamClientFactory.class), any(ControllerEventProcessorConfig.class));
    AtomicBoolean requestCalled = new AtomicBoolean(false);
    AtomicBoolean commitCalled = new AtomicBoolean(false);
    CompletableFuture<Void> requestStreamTruncationFuture = new CompletableFuture<>();
    CompletableFuture<Void> kvtStreamTruncationFuture = new CompletableFuture<>();
    CompletableFuture<Void> abortStreamTruncationFuture = new CompletableFuture<>();
    CompletableFuture<Void> commitStreamTruncationFuture = new CompletableFuture<>();
    doAnswer(x -> {
        String argument = x.getArgument(1);
        if (argument.equals(config.getRequestStreamName())) {
            // let one of the processors throw the exception. this should still be retried in the next cycle.
            if (!requestCalled.get()) {
                requestCalled.set(true);
                throw new RuntimeException("inducing sporadic failure");
            } else {
                requestStreamTruncationFuture.complete(null);
            }
        } else if (argument.equals(config.getCommitStreamName())) {
            // let one of the processors throw the exception. this should still be retried in the next cycle.
            if (commitCalled.get()) {
                commitStreamTruncationFuture.complete(null);
            } else {
                commitCalled.set(true);
                return CompletableFuture.completedFuture(false);
            }
        } else if (argument.equals(config.getAbortStreamName())) {
            abortStreamTruncationFuture.complete(null);
        } else if (argument.equals(config.getKvtStreamName())) {
            kvtStreamTruncationFuture.complete(null);
        }
        return CompletableFuture.completedFuture(true);
    }).when(streamMetadataTasks).startTruncation(anyString(), anyString(), any(), any());
    Set<String> processes = Sets.newHashSet("p1", "p2", "p3");
    // first throw checkpoint store exception
    AtomicBoolean signal = new AtomicBoolean(false);
    CountDownLatch cd = new CountDownLatch(4);
    doAnswer(x -> {
        // this ensures that the call to truncate has been invoked for all 4 internal streams.
        cd.countDown();
        cd.await();
        if (!signal.get()) {
            throw new CheckpointStoreException("CheckpointStoreException");
        } else {
            return processes;
        }
    }).when(checkpointStore).getProcesses();
    Map<String, PositionImpl> r1 = Collections.singletonMap("r1", position1);
    doReturn(r1).when(checkpointStore).getPositions(eq("p1"), anyString());
    Map<String, PositionImpl> r2 = Collections.singletonMap("r2", position1);
    doReturn(r2).when(checkpointStore).getPositions(eq("p2"), anyString());
    Map<String, PositionImpl> r3 = Collections.singletonMap("r3", position1);
    doReturn(r3).when(checkpointStore).getPositions(eq("p3"), anyString());
    @Cleanup ControllerEventProcessors processors = new ControllerEventProcessors("host1", config, controller, checkpointStore, streamStore, bucketStore, connectionPool, streamMetadataTasks, streamTransactionMetadataTasks, kvtStore, kvtTasks, system, executorService());
    // set truncation interval
    processors.setTruncationInterval(100L);
    processors.startAsync();
    processors.awaitRunning();
    ControllerEventProcessors processorsSpied = spy(processors);
    processorsSpied.bootstrap(streamTransactionMetadataTasks, streamMetadataTasks, kvtTasks);
    // wait for all 4 countdown exceptions to have been thrown.
    cd.await();
    verify(processorsSpied, atLeast(4)).truncate(any(), any(), any());
    verify(checkpointStore, atLeast(4)).getProcesses();
    verify(checkpointStore, never()).getPositions(anyString(), anyString());
    verify(streamMetadataTasks, never()).startTruncation(anyString(), anyString(), any(), any());
    signal.set(true);
    CompletableFuture.allOf(requestStreamTruncationFuture, commitStreamTruncationFuture, abortStreamTruncationFuture, kvtStreamTruncationFuture).join();
    // verify that truncate method is being called periodically.
    verify(processorsSpied, atLeastOnce()).truncate(config.getRequestStreamName(), config.getRequestReaderGroupName(), streamMetadataTasks);
    verify(processorsSpied, atLeastOnce()).truncate(config.getCommitStreamName(), config.getCommitReaderGroupName(), streamMetadataTasks);
    verify(processorsSpied, atLeastOnce()).truncate(config.getAbortStreamName(), config.getAbortReaderGroupName(), streamMetadataTasks);
    verify(processorsSpied, atLeastOnce()).truncate(config.getKvtStreamName(), config.getKvtReaderGroupName(), streamMetadataTasks);
    for (int i = 1; i <= 3; i++) {
        verify(checkpointStore, atLeastOnce()).getPositions("p" + i, config.getRequestReaderGroupName());
        verify(checkpointStore, atLeastOnce()).getPositions("p" + i, config.getCommitReaderGroupName());
        verify(checkpointStore, atLeastOnce()).getPositions("p" + i, config.getAbortReaderGroupName());
        verify(checkpointStore, atLeastOnce()).getPositions("p" + i, config.getKvtReaderGroupName());
    }
}
Also used : ConnectionPool(io.pravega.client.connection.impl.ConnectionPool) EventProcessorSystem(io.pravega.controller.eventProcessor.EventProcessorSystem) HashMap(java.util.HashMap) PositionImpl(io.pravega.client.stream.impl.PositionImpl) EventStreamClientFactory(io.pravega.client.EventStreamClientFactory) CheckpointStore(io.pravega.controller.store.checkpoint.CheckpointStore) ZKCheckpointStore(io.pravega.controller.store.checkpoint.ZKCheckpointStore) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) StreamMetadataStore(io.pravega.controller.store.stream.StreamMetadataStore) Cleanup(lombok.Cleanup) Segment(io.pravega.client.segment.impl.Segment) CompletableFuture(java.util.concurrent.CompletableFuture) StreamTransactionMetadataTasks(io.pravega.controller.task.Stream.StreamTransactionMetadataTasks) SegmentWithRange(io.pravega.client.stream.impl.SegmentWithRange) TableMetadataTasks(io.pravega.controller.task.KeyValueTable.TableMetadataTasks) BucketStore(io.pravega.controller.store.stream.BucketStore) StreamMetadataTasks(io.pravega.controller.task.Stream.StreamMetadataTasks) KVTableMetadataStore(io.pravega.controller.store.kvtable.KVTableMetadataStore) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) CheckpointStoreException(io.pravega.controller.store.checkpoint.CheckpointStoreException) Test(org.junit.Test)

Example 5 with TableMetadataTasks

use of io.pravega.controller.task.KeyValueTable.TableMetadataTasks in project pravega by pravega.

the class ControllerEventProcessorsTest method testBootstrap.

@Test(timeout = 30000L)
public void testBootstrap() throws Exception {
    LocalController controller = mock(LocalController.class);
    CheckpointStore checkpointStore = mock(CheckpointStore.class);
    StreamMetadataStore streamStore = mock(StreamMetadataStore.class);
    BucketStore bucketStore = mock(BucketStore.class);
    ConnectionPool connectionPool = mock(ConnectionPool.class);
    StreamMetadataTasks streamMetadataTasks = mock(StreamMetadataTasks.class);
    StreamTransactionMetadataTasks streamTransactionMetadataTasks = mock(StreamTransactionMetadataTasks.class);
    KVTableMetadataStore kvtStore = mock(KVTableMetadataStore.class);
    TableMetadataTasks kvtTasks = mock(TableMetadataTasks.class);
    ControllerEventProcessorConfig config = ControllerEventProcessorConfigImpl.withDefault();
    EventProcessorSystem system = mock(EventProcessorSystem.class);
    doAnswer(x -> null).when(streamMetadataTasks).initializeStreamWriters(any(), any());
    doAnswer(x -> null).when(streamTransactionMetadataTasks).initializeStreamWriters(any(EventStreamClientFactory.class), any(ControllerEventProcessorConfig.class));
    LinkedBlockingQueue<CompletableFuture<Boolean>> createScopeResponses = new LinkedBlockingQueue<>();
    LinkedBlockingQueue<CompletableFuture<Void>> createScopeSignals = new LinkedBlockingQueue<>();
    List<CompletableFuture<Boolean>> createScopeResponsesList = new LinkedList<>();
    List<CompletableFuture<Void>> createScopeSignalsList = new LinkedList<>();
    for (int i = 0; i < 2; i++) {
        CompletableFuture<Boolean> responseFuture = new CompletableFuture<>();
        CompletableFuture<Void> signalFuture = new CompletableFuture<>();
        createScopeResponsesList.add(responseFuture);
        createScopeResponses.add(responseFuture);
        createScopeSignalsList.add(signalFuture);
        createScopeSignals.add(signalFuture);
    }
    // return a future from latches queue
    doAnswer(x -> {
        createScopeSignals.take().complete(null);
        return createScopeResponses.take();
    }).when(controller).createScope(anyString());
    LinkedBlockingQueue<CompletableFuture<Boolean>> createStreamResponses = new LinkedBlockingQueue<>();
    LinkedBlockingQueue<CompletableFuture<Void>> createStreamSignals = new LinkedBlockingQueue<>();
    List<CompletableFuture<Boolean>> createStreamResponsesList = new LinkedList<>();
    List<CompletableFuture<Void>> createStreamSignalsList = new LinkedList<>();
    for (int i = 0; i < 8; i++) {
        CompletableFuture<Boolean> responseFuture = new CompletableFuture<>();
        CompletableFuture<Void> signalFuture = new CompletableFuture<>();
        createStreamResponsesList.add(responseFuture);
        createStreamResponses.add(responseFuture);
        createStreamSignalsList.add(signalFuture);
        createStreamSignals.add(signalFuture);
    }
    // return a future from latches queue
    doAnswer(x -> {
        createStreamSignals.take().complete(null);
        return createStreamResponses.take();
    }).when(controller).createInternalStream(anyString(), anyString(), any());
    @Cleanup ControllerEventProcessors processors = new ControllerEventProcessors("host1", config, controller, checkpointStore, streamStore, bucketStore, connectionPool, streamMetadataTasks, streamTransactionMetadataTasks, kvtStore, kvtTasks, system, executorService());
    // call bootstrap on ControllerEventProcessors
    processors.bootstrap(streamTransactionMetadataTasks, streamMetadataTasks, kvtTasks);
    // wait on create scope being called.
    createScopeSignalsList.get(0).join();
    verify(controller, times(1)).createScope(any());
    // complete scopeFuture1 exceptionally. this should result in a retry.
    createScopeResponsesList.get(0).completeExceptionally(new RuntimeException());
    // wait on second scope signal being called
    createScopeSignalsList.get(1).join();
    verify(controller, times(2)).createScope(any());
    // so far no create stream should have been invoked
    verify(controller, times(0)).createInternalStream(anyString(), anyString(), any());
    // complete scopeFuture2 successfully
    createScopeResponsesList.get(1).complete(true);
    // create streams should be called now
    // since we call four create streams. We will wait on first three signal futures
    createStreamSignalsList.get(0).join();
    createStreamSignalsList.get(1).join();
    createStreamSignalsList.get(2).join();
    createStreamSignalsList.get(3).join();
    verify(controller, times(4)).createInternalStream(anyString(), anyString(), any());
    // fail first four requests
    createStreamResponsesList.get(0).completeExceptionally(new RuntimeException());
    createStreamResponsesList.get(1).completeExceptionally(new RuntimeException());
    createStreamResponsesList.get(2).completeExceptionally(new RuntimeException());
    createStreamResponsesList.get(3).completeExceptionally(new RuntimeException());
    // this should result in a retry for four create streams. wait on next four signals
    createStreamSignalsList.get(4).join();
    createStreamSignalsList.get(5).join();
    createStreamSignalsList.get(6).join();
    createStreamSignalsList.get(7).join();
    verify(controller, times(8)).createInternalStream(anyString(), anyString(), any());
    // complete successfully
    createStreamResponsesList.get(4).complete(true);
    createStreamResponsesList.get(5).complete(true);
    createStreamResponsesList.get(6).complete(true);
    createStreamResponsesList.get(7).complete(true);
    AssertExtensions.assertEventuallyEquals(true, () -> processors.getBootstrapCompleted().get(), 10000);
}
Also used : ConnectionPool(io.pravega.client.connection.impl.ConnectionPool) EventProcessorSystem(io.pravega.controller.eventProcessor.EventProcessorSystem) EventStreamClientFactory(io.pravega.client.EventStreamClientFactory) CheckpointStore(io.pravega.controller.store.checkpoint.CheckpointStore) ZKCheckpointStore(io.pravega.controller.store.checkpoint.ZKCheckpointStore) StreamMetadataStore(io.pravega.controller.store.stream.StreamMetadataStore) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Cleanup(lombok.Cleanup) CompletableFuture(java.util.concurrent.CompletableFuture) StreamTransactionMetadataTasks(io.pravega.controller.task.Stream.StreamTransactionMetadataTasks) TableMetadataTasks(io.pravega.controller.task.KeyValueTable.TableMetadataTasks) BucketStore(io.pravega.controller.store.stream.BucketStore) StreamMetadataTasks(io.pravega.controller.task.Stream.StreamMetadataTasks) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) KVTableMetadataStore(io.pravega.controller.store.kvtable.KVTableMetadataStore) LinkedList(java.util.LinkedList) Test(org.junit.Test)

Aggregations

BucketStore (io.pravega.controller.store.stream.BucketStore)15 TableMetadataTasks (io.pravega.controller.task.KeyValueTable.TableMetadataTasks)15 StreamMetadataTasks (io.pravega.controller.task.Stream.StreamMetadataTasks)14 StreamTransactionMetadataTasks (io.pravega.controller.task.Stream.StreamTransactionMetadataTasks)14 StreamMetadataStore (io.pravega.controller.store.stream.StreamMetadataStore)10 KVTableMetadataStore (io.pravega.controller.store.kvtable.KVTableMetadataStore)9 AutoScaleTask (io.pravega.controller.server.eventProcessor.requesthandlers.AutoScaleTask)8 CreateReaderGroupTask (io.pravega.controller.server.eventProcessor.requesthandlers.CreateReaderGroupTask)8 DeleteReaderGroupTask (io.pravega.controller.server.eventProcessor.requesthandlers.DeleteReaderGroupTask)8 DeleteScopeTask (io.pravega.controller.server.eventProcessor.requesthandlers.DeleteScopeTask)8 DeleteStreamTask (io.pravega.controller.server.eventProcessor.requesthandlers.DeleteStreamTask)8 ScaleOperationTask (io.pravega.controller.server.eventProcessor.requesthandlers.ScaleOperationTask)8 SealStreamTask (io.pravega.controller.server.eventProcessor.requesthandlers.SealStreamTask)8 StreamRequestHandler (io.pravega.controller.server.eventProcessor.requesthandlers.StreamRequestHandler)8 TruncateStreamTask (io.pravega.controller.server.eventProcessor.requesthandlers.TruncateStreamTask)8 UpdateReaderGroupTask (io.pravega.controller.server.eventProcessor.requesthandlers.UpdateReaderGroupTask)8 UpdateStreamTask (io.pravega.controller.server.eventProcessor.requesthandlers.UpdateStreamTask)8 ControllerEventStreamWriterMock (io.pravega.controller.mocks.ControllerEventStreamWriterMock)7 EventHelper (io.pravega.controller.task.EventHelper)7 Before (org.junit.Before)7