use of io.pravega.controller.task.KeyValueTable.TableMetadataTasks in project pravega by pravega.
the class ControllerServiceStarter method startUp.
@Override
protected void startUp() {
long traceId = LoggerHelpers.traceEnterWithContext(log, this.objectId, "startUp");
log.info("Initiating controller service startUp");
log.info("Controller serviceConfig = {}", serviceConfig.toString());
log.info("Event processors enabled = {}", serviceConfig.getEventProcessorConfig().isPresent());
log.info("Cluster listener enabled = {}", serviceConfig.isControllerClusterListenerEnabled());
log.info(" Host monitor enabled = {}", serviceConfig.getHostMonitorConfig().isHostMonitorEnabled());
log.info(" gRPC server enabled = {}", serviceConfig.getGRPCServerConfig().isPresent());
log.info(" REST server enabled = {}", serviceConfig.getRestServerConfig().isPresent());
final BucketStore bucketStore;
final TaskMetadataStore taskMetadataStore;
final HostControllerStore hostStore;
final CheckpointStore checkpointStore;
try {
// Initialize the executor service.
controllerExecutor = ExecutorServiceHelpers.newScheduledThreadPool(serviceConfig.getThreadPoolSize(), "controllerpool");
eventExecutor = ExecutorServiceHelpers.newScheduledThreadPool(serviceConfig.getThreadPoolSize(), "eventprocessor");
retentionExecutor = ExecutorServiceHelpers.newScheduledThreadPool(Config.RETENTION_THREAD_POOL_SIZE, "retentionpool");
watermarkingExecutor = ExecutorServiceHelpers.newScheduledThreadPool(Config.WATERMARKING_THREAD_POOL_SIZE, "watermarkingpool");
bucketStore = StreamStoreFactory.createBucketStore(storeClient, controllerExecutor);
log.info("Created the bucket store.");
taskMetadataStore = TaskStoreFactory.createStore(storeClient, controllerExecutor);
log.info("Created the task store.");
hostStore = HostStoreFactory.createStore(serviceConfig.getHostMonitorConfig(), storeClient);
log.info("Created the host store.");
checkpointStore = CheckpointStoreFactory.create(storeClient);
log.info("Created the checkpoint store.");
// Initialize Stream and Transaction metrics.
StreamMetrics.initialize();
TransactionMetrics.initialize();
// On each controller process restart, we use a fresh hostId,
// which is a combination of hostname and random GUID.
String hostName = getHostName();
Host host = new Host(hostName, getPort(), UUID.randomUUID().toString());
// Create a RequestTracker instance to trace client requests end-to-end.
GRPCServerConfig grpcServerConfig = serviceConfig.getGRPCServerConfig().get();
RequestTracker requestTracker = new RequestTracker(grpcServerConfig.isRequestTracingEnabled());
// Create a Health Service Manager instance.
healthServiceManager = new HealthServiceManager(serviceConfig.getHealthCheckFrequency());
if (serviceConfig.getHostMonitorConfig().isHostMonitorEnabled()) {
// Start the Segment Container Monitor.
monitor = new SegmentContainerMonitor(hostStore, (CuratorFramework) storeClient.getClient(), new UniformContainerBalancer(), serviceConfig.getHostMonitorConfig().getHostMonitorMinRebalanceInterval());
monitor.startAsync();
log.info("Started Segment Container Monitor service.");
SegmentContainerMonitorHealthContributor segmentContainerMonitorHC = new SegmentContainerMonitorHealthContributor("segmentContainerMonitor", monitor);
healthServiceManager.register(segmentContainerMonitorHC);
}
// This client config is used by the segment store helper (SegmentHelper) to connect to the segment store.
ClientConfig.ClientConfigBuilder clientConfigBuilder = ClientConfig.builder().controllerURI(URI.create((grpcServerConfig.isTlsEnabled() ? "tls://" : "tcp://") + "localhost:" + grpcServerConfig.getPort())).trustStore(grpcServerConfig.getTlsTrustStore()).validateHostName(false);
Optional<Boolean> tlsEnabledForSegmentStore = BooleanUtils.extract(serviceConfig.getTlsEnabledForSegmentStore());
if (tlsEnabledForSegmentStore.isPresent()) {
clientConfigBuilder.enableTlsToSegmentStore(tlsEnabledForSegmentStore.get());
}
// Use one connection per Segment Store to save up resources.
ClientConfig clientConfig = clientConfigBuilder.maxConnectionsPerSegmentStore(1).build();
connectionFactory = connectionFactoryRef.orElseGet(() -> new SocketConnectionFactoryImpl(clientConfig));
connectionPool = new ConnectionPoolImpl(clientConfig, connectionFactory);
segmentHelper = segmentHelperRef.orElseGet(() -> new SegmentHelper(connectionPool, hostStore, controllerExecutor));
GrpcAuthHelper authHelper = new GrpcAuthHelper(serviceConfig.getGRPCServerConfig().get().isAuthorizationEnabled(), grpcServerConfig.getTokenSigningKey(), grpcServerConfig.getAccessTokenTTLInSeconds());
streamStore = streamMetadataStoreRef.orElseGet(() -> StreamStoreFactory.createStore(storeClient, segmentHelper, authHelper, controllerExecutor));
log.info("Created the stream store.");
streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelper, controllerExecutor, eventExecutor, host.getHostId(), authHelper, serviceConfig.getRetentionFrequency().toMillis());
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelper, controllerExecutor, eventExecutor, host.getHostId(), serviceConfig.getTimeoutServiceConfig(), authHelper);
BucketServiceFactory bucketServiceFactory = new BucketServiceFactory(host.getHostId(), bucketStore, 1000);
Duration executionDurationRetention = serviceConfig.getRetentionFrequency();
PeriodicRetention retentionWork = new PeriodicRetention(streamStore, streamMetadataTasks, retentionExecutor, requestTracker);
retentionService = bucketServiceFactory.createRetentionService(executionDurationRetention, retentionWork::retention, retentionExecutor);
retentionService.startAsync();
retentionService.awaitRunning();
log.info("Started background periodic service for Retention.");
RetentionServiceHealthContributor retentionServiceHC = new RetentionServiceHealthContributor("retentionService", retentionService);
healthServiceManager.register(retentionServiceHC);
Duration executionDurationWatermarking = Duration.ofSeconds(Config.MINIMUM_WATERMARKING_FREQUENCY_IN_SECONDS);
watermarkingWork = new PeriodicWatermarking(streamStore, bucketStore, clientConfig, watermarkingExecutor, requestTracker);
watermarkingService = bucketServiceFactory.createWatermarkingService(executionDurationWatermarking, watermarkingWork::watermark, watermarkingExecutor);
watermarkingService.startAsync();
watermarkingService.awaitRunning();
log.info("Started background periodic service for Watermarking.");
WatermarkingServiceHealthContributor watermarkingServiceHC = new WatermarkingServiceHealthContributor("watermarkingService", watermarkingService);
healthServiceManager.register(watermarkingServiceHC);
// Controller has a mechanism to track the currently active controller host instances. On detecting a failure of
// any controller instance, the failure detector stores the failed HostId in a failed hosts directory (FH), and
// invokes the taskSweeper.sweepOrphanedTasks for each failed host. When all resources under the failed hostId
// are processed and deleted, that failed HostId is removed from FH folder.
// Moreover, on controller process startup, it detects any hostIds not in the currently active set of
// controllers and starts sweeping tasks orphaned by those hostIds.
TaskSweeper taskSweeper = new TaskSweeper(taskMetadataStore, host.getHostId(), controllerExecutor, streamMetadataTasks);
TxnSweeper txnSweeper = new TxnSweeper(streamStore, streamTransactionMetadataTasks, serviceConfig.getTimeoutServiceConfig().getMaxLeaseValue(), controllerExecutor);
RequestSweeper requestSweeper = new RequestSweeper(streamStore, controllerExecutor, streamMetadataTasks);
if (serviceConfig.isControllerClusterListenerEnabled()) {
cluster = new ClusterZKImpl((CuratorFramework) storeClient.getClient(), ClusterType.CONTROLLER);
}
kvtMetadataStore = kvtMetaStoreRef.orElseGet(() -> KVTableStoreFactory.createStore(storeClient, segmentHelper, authHelper, controllerExecutor, streamStore));
kvtMetadataTasks = new TableMetadataTasks(kvtMetadataStore, segmentHelper, controllerExecutor, eventExecutor, host.getHostId(), authHelper);
controllerService = new ControllerService(kvtMetadataStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, streamTransactionMetadataTasks, segmentHelper, controllerExecutor, cluster, requestTracker);
// Setup event processors.
setController(new LocalController(controllerService, grpcServerConfig.isAuthorizationEnabled(), grpcServerConfig.getTokenSigningKey()));
CompletableFuture<Void> eventProcessorFuture = CompletableFuture.completedFuture(null);
if (serviceConfig.getEventProcessorConfig().isPresent()) {
// Create ControllerEventProcessor object.
controllerEventProcessors = new ControllerEventProcessors(host.getHostId(), serviceConfig.getEventProcessorConfig().get(), localController, checkpointStore, streamStore, bucketStore, connectionPool, streamMetadataTasks, streamTransactionMetadataTasks, kvtMetadataStore, kvtMetadataTasks, eventExecutor);
// Bootstrap and start it asynchronously.
eventProcessorFuture = controllerEventProcessors.bootstrap(streamTransactionMetadataTasks, streamMetadataTasks, kvtMetadataTasks).thenAcceptAsync(x -> controllerEventProcessors.startAsync(), eventExecutor);
EventProcessorHealthContributor eventProcessorHC = new EventProcessorHealthContributor("eventProcessor", controllerEventProcessors);
healthServiceManager.register(eventProcessorHC);
}
// Setup and start controller cluster listener after all sweepers have been initialized.
if (serviceConfig.isControllerClusterListenerEnabled()) {
List<FailoverSweeper> failoverSweepers = new ArrayList<>();
failoverSweepers.add(taskSweeper);
failoverSweepers.add(txnSweeper);
failoverSweepers.add(requestSweeper);
if (serviceConfig.getEventProcessorConfig().isPresent()) {
assert controllerEventProcessors != null;
failoverSweepers.add(controllerEventProcessors);
}
controllerClusterListener = new ControllerClusterListener(host, cluster, controllerExecutor, failoverSweepers);
controllerClusterListener.startAsync();
ClusterListenerHealthContributor clusterListenerHC = new ClusterListenerHealthContributor("clusterListener", controllerClusterListener);
healthServiceManager.register(clusterListenerHC);
}
// Start the Health Service.
healthServiceManager.start();
// Start RPC server.
if (serviceConfig.getGRPCServerConfig().isPresent()) {
grpcServer = new GRPCServer(controllerService, grpcServerConfig, requestTracker);
grpcServer.startAsync();
grpcServer.awaitRunning();
GRPCServerHealthContributor grpcServerHC = new GRPCServerHealthContributor("GRPCServer", grpcServer);
healthServiceManager.register(grpcServerHC);
}
// Start REST server.
if (serviceConfig.getRestServerConfig().isPresent()) {
List<Object> resources = new ArrayList<>();
resources.add(new StreamMetadataResourceImpl(this.localController, controllerService, grpcServer.getAuthHandlerManager(), connectionFactory, clientConfig));
resources.add(new HealthImpl(grpcServer.getAuthHandlerManager(), healthServiceManager.getEndpoint()));
resources.add(new PingImpl());
MetricsProvider.getMetricsProvider().prometheusResource().ifPresent(resources::add);
restServer = new RESTServer(serviceConfig.getRestServerConfig().get(), Set.copyOf(resources));
restServer.startAsync();
restServer.awaitRunning();
}
// Wait for controller event processors to start.
if (serviceConfig.getEventProcessorConfig().isPresent()) {
// if store client has failed because of session expiration, there are two possibilities where
// controllerEventProcessors.awaitRunning may be stuck forever -
// 1. stream creation is retried indefinitely and cannot complete because of zk session expiration
// 2. event writer after stream creation throws exception.
// In both of above cases controllerEventProcessors.startAsync may not get called.
CompletableFuture.anyOf(storeClientFailureFuture, eventProcessorFuture.thenAccept(x -> controllerEventProcessors.awaitRunning())).join();
}
// Wait for controller cluster listeners to start.
if (serviceConfig.isControllerClusterListenerEnabled()) {
controllerClusterListener.awaitRunning();
}
} catch (Exception e) {
log.error("Failed trying to start controller services", e);
throw e;
} finally {
LoggerHelpers.traceLeave(log, this.objectId, "startUp", traceId);
}
}
use of io.pravega.controller.task.KeyValueTable.TableMetadataTasks in project pravega by pravega.
the class ControllerServiceWithKVTableTest method setup.
@Before
public void setup() {
segmentHelperMock = SegmentHelperMock.getSegmentHelperMockForTables(executor);
streamStore = spy(getStore());
kvtStore = spy(getKVTStore());
BucketStore bucketStore = StreamStoreFactory.createZKBucketStore(PRAVEGA_ZK_CURATOR_RESOURCE.client, executor);
TaskMetadataStore taskMetadataStore = TaskStoreFactory.createZKStore(PRAVEGA_ZK_CURATOR_RESOURCE.client, executor);
connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(URI.create("tcp://localhost")).build());
GrpcAuthHelper disabledAuthHelper = GrpcAuthHelper.getDisabledAuthHelper();
StreamMetrics.initialize();
TransactionMetrics.initialize();
EventHelper helperMock = EventHelperMock.getEventHelperMock(executor, "host", ((AbstractStreamMetadataStore) streamStore).getHostTaskIndex());
streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelperMock, executor, "host", disabledAuthHelper, helperMock);
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelperMock, executor, "host", disabledAuthHelper);
kvtMetadataTasks = spy(new TableMetadataTasks(kvtStore, segmentHelperMock, executor, executor, "host", GrpcAuthHelper.getDisabledAuthHelper(), helperMock));
StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStore, executor), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executor), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executor), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executor), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtMetadataTasks, executor), executor);
streamMetadataTasks.setRequestEventWriter(new ControllerEventStreamWriterMock(streamRequestHandler, executor));
consumer = new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, streamTransactionMetadataTasks, segmentHelperMock, executor, null, requestTracker);
}
use of io.pravega.controller.task.KeyValueTable.TableMetadataTasks in project pravega by pravega.
the class ControllerServiceWithStreamTest method setup.
@Before
public void setup() {
try {
zkServer = new TestingServerStarter().start();
} catch (Exception e) {
log.error("Error starting ZK server", e);
}
zkClient = CuratorFrameworkFactory.newClient(zkServer.getConnectString(), new ExponentialBackoffRetry(200, 10, 5000));
zkClient.start();
streamStore = spy(getStore());
kvtStore = spy(getKVTStore());
BucketStore bucketStore = StreamStoreFactory.createZKBucketStore(zkClient, executor);
TaskMetadataStore taskMetadataStore = TaskStoreFactory.createZKStore(zkClient, executor);
connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(URI.create("tcp://localhost")).build());
GrpcAuthHelper disabledAuthHelper = GrpcAuthHelper.getDisabledAuthHelper();
SegmentHelper segmentHelperMock = SegmentHelperMock.getSegmentHelperMock();
StreamMetrics.initialize();
TransactionMetrics.initialize();
EventHelper helperMock = EventHelperMock.getEventHelperMock(executor, "host", ((AbstractStreamMetadataStore) streamStore).getHostTaskIndex());
streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelperMock, executor, "host", disabledAuthHelper, helperMock);
kvtMetadataTasks = spy(new TableMetadataTasks(kvtStore, segmentHelperMock, executor, executor, "host", GrpcAuthHelper.getDisabledAuthHelper(), helperMock));
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelperMock, executor, "host", disabledAuthHelper);
StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStore, executor), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executor), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executor), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executor), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtMetadataTasks, executor), executor);
streamMetadataTasks.setRequestEventWriter(new ControllerEventStreamWriterMock(streamRequestHandler, executor));
consumer = new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, streamTransactionMetadataTasks, segmentHelperMock, executor, null, requestTracker);
}
use of io.pravega.controller.task.KeyValueTable.TableMetadataTasks in project pravega by pravega.
the class ControllerEventProcessorsTest method testTruncate.
@Test(timeout = 10000L)
public void testTruncate() throws CheckpointStoreException, InterruptedException {
LocalController controller = mock(LocalController.class);
CheckpointStore checkpointStore = mock(CheckpointStore.class);
StreamMetadataStore streamStore = mock(StreamMetadataStore.class);
BucketStore bucketStore = mock(BucketStore.class);
ConnectionPool connectionPool = mock(ConnectionPool.class);
StreamMetadataTasks streamMetadataTasks = mock(StreamMetadataTasks.class);
StreamTransactionMetadataTasks streamTransactionMetadataTasks = mock(StreamTransactionMetadataTasks.class);
KVTableMetadataStore kvtStore = mock(KVTableMetadataStore.class);
TableMetadataTasks kvtTasks = mock(TableMetadataTasks.class);
ControllerEventProcessorConfig config = ControllerEventProcessorConfigImpl.withDefault();
EventProcessorSystem system = mock(EventProcessorSystem.class);
Map<SegmentWithRange, Long> map1 = new HashMap<>();
map1.put(new SegmentWithRange(new Segment("scope", "stream", 0L), 0.0, 0.33), 10L);
map1.put(new SegmentWithRange(new Segment("scope", "stream", 1L), 0.33, 0.66), 10L);
map1.put(new SegmentWithRange(new Segment("scope", "stream", 2L), 0.66, 1.0), 20L);
Map<SegmentWithRange, Long> map2 = new HashMap<>();
map2.put(new SegmentWithRange(new Segment("scope", "stream", 0L), 0.0, 0.33), 20L);
map2.put(new SegmentWithRange(new Segment("scope", "stream", 2L), 0.66, 1.0), 10L);
Map<SegmentWithRange, Long> map3 = new HashMap<>();
map3.put(new SegmentWithRange(new Segment("scope", "stream", 3L), 0.0, 0.33), 0L);
map3.put(new SegmentWithRange(new Segment("scope", "stream", 4L), 0.33, 0.66), 10L);
map3.put(new SegmentWithRange(new Segment("scope", "stream", 5L), 0.66, 1.0), 20L);
PositionImpl position1 = new PositionImpl(map1);
PositionImpl position2 = new PositionImpl(map2);
PositionImpl position3 = new PositionImpl(map3);
doReturn(getProcessor()).when(system).createEventProcessorGroup(any(), any(), any());
doReturn(CompletableFuture.completedFuture(null)).when(controller).createScope(anyString());
doReturn(CompletableFuture.completedFuture(null)).when(controller).createInternalStream(anyString(), anyString(), any());
doNothing().when(streamMetadataTasks).initializeStreamWriters(any(), anyString());
doNothing().when(streamTransactionMetadataTasks).initializeStreamWriters(any(EventStreamClientFactory.class), any(ControllerEventProcessorConfig.class));
AtomicBoolean requestCalled = new AtomicBoolean(false);
AtomicBoolean commitCalled = new AtomicBoolean(false);
CompletableFuture<Void> requestStreamTruncationFuture = new CompletableFuture<>();
CompletableFuture<Void> kvtStreamTruncationFuture = new CompletableFuture<>();
CompletableFuture<Void> abortStreamTruncationFuture = new CompletableFuture<>();
CompletableFuture<Void> commitStreamTruncationFuture = new CompletableFuture<>();
doAnswer(x -> {
String argument = x.getArgument(1);
if (argument.equals(config.getRequestStreamName())) {
// let one of the processors throw the exception. this should still be retried in the next cycle.
if (!requestCalled.get()) {
requestCalled.set(true);
throw new RuntimeException("inducing sporadic failure");
} else {
requestStreamTruncationFuture.complete(null);
}
} else if (argument.equals(config.getCommitStreamName())) {
// let one of the processors throw the exception. this should still be retried in the next cycle.
if (commitCalled.get()) {
commitStreamTruncationFuture.complete(null);
} else {
commitCalled.set(true);
return CompletableFuture.completedFuture(false);
}
} else if (argument.equals(config.getAbortStreamName())) {
abortStreamTruncationFuture.complete(null);
} else if (argument.equals(config.getKvtStreamName())) {
kvtStreamTruncationFuture.complete(null);
}
return CompletableFuture.completedFuture(true);
}).when(streamMetadataTasks).startTruncation(anyString(), anyString(), any(), any());
Set<String> processes = Sets.newHashSet("p1", "p2", "p3");
// first throw checkpoint store exception
AtomicBoolean signal = new AtomicBoolean(false);
CountDownLatch cd = new CountDownLatch(4);
doAnswer(x -> {
// this ensures that the call to truncate has been invoked for all 4 internal streams.
cd.countDown();
cd.await();
if (!signal.get()) {
throw new CheckpointStoreException("CheckpointStoreException");
} else {
return processes;
}
}).when(checkpointStore).getProcesses();
Map<String, PositionImpl> r1 = Collections.singletonMap("r1", position1);
doReturn(r1).when(checkpointStore).getPositions(eq("p1"), anyString());
Map<String, PositionImpl> r2 = Collections.singletonMap("r2", position1);
doReturn(r2).when(checkpointStore).getPositions(eq("p2"), anyString());
Map<String, PositionImpl> r3 = Collections.singletonMap("r3", position1);
doReturn(r3).when(checkpointStore).getPositions(eq("p3"), anyString());
@Cleanup ControllerEventProcessors processors = new ControllerEventProcessors("host1", config, controller, checkpointStore, streamStore, bucketStore, connectionPool, streamMetadataTasks, streamTransactionMetadataTasks, kvtStore, kvtTasks, system, executorService());
// set truncation interval
processors.setTruncationInterval(100L);
processors.startAsync();
processors.awaitRunning();
ControllerEventProcessors processorsSpied = spy(processors);
processorsSpied.bootstrap(streamTransactionMetadataTasks, streamMetadataTasks, kvtTasks);
// wait for all 4 countdown exceptions to have been thrown.
cd.await();
verify(processorsSpied, atLeast(4)).truncate(any(), any(), any());
verify(checkpointStore, atLeast(4)).getProcesses();
verify(checkpointStore, never()).getPositions(anyString(), anyString());
verify(streamMetadataTasks, never()).startTruncation(anyString(), anyString(), any(), any());
signal.set(true);
CompletableFuture.allOf(requestStreamTruncationFuture, commitStreamTruncationFuture, abortStreamTruncationFuture, kvtStreamTruncationFuture).join();
// verify that truncate method is being called periodically.
verify(processorsSpied, atLeastOnce()).truncate(config.getRequestStreamName(), config.getRequestReaderGroupName(), streamMetadataTasks);
verify(processorsSpied, atLeastOnce()).truncate(config.getCommitStreamName(), config.getCommitReaderGroupName(), streamMetadataTasks);
verify(processorsSpied, atLeastOnce()).truncate(config.getAbortStreamName(), config.getAbortReaderGroupName(), streamMetadataTasks);
verify(processorsSpied, atLeastOnce()).truncate(config.getKvtStreamName(), config.getKvtReaderGroupName(), streamMetadataTasks);
for (int i = 1; i <= 3; i++) {
verify(checkpointStore, atLeastOnce()).getPositions("p" + i, config.getRequestReaderGroupName());
verify(checkpointStore, atLeastOnce()).getPositions("p" + i, config.getCommitReaderGroupName());
verify(checkpointStore, atLeastOnce()).getPositions("p" + i, config.getAbortReaderGroupName());
verify(checkpointStore, atLeastOnce()).getPositions("p" + i, config.getKvtReaderGroupName());
}
}
use of io.pravega.controller.task.KeyValueTable.TableMetadataTasks in project pravega by pravega.
the class ControllerEventProcessorsTest method testBootstrap.
@Test(timeout = 30000L)
public void testBootstrap() throws Exception {
LocalController controller = mock(LocalController.class);
CheckpointStore checkpointStore = mock(CheckpointStore.class);
StreamMetadataStore streamStore = mock(StreamMetadataStore.class);
BucketStore bucketStore = mock(BucketStore.class);
ConnectionPool connectionPool = mock(ConnectionPool.class);
StreamMetadataTasks streamMetadataTasks = mock(StreamMetadataTasks.class);
StreamTransactionMetadataTasks streamTransactionMetadataTasks = mock(StreamTransactionMetadataTasks.class);
KVTableMetadataStore kvtStore = mock(KVTableMetadataStore.class);
TableMetadataTasks kvtTasks = mock(TableMetadataTasks.class);
ControllerEventProcessorConfig config = ControllerEventProcessorConfigImpl.withDefault();
EventProcessorSystem system = mock(EventProcessorSystem.class);
doAnswer(x -> null).when(streamMetadataTasks).initializeStreamWriters(any(), any());
doAnswer(x -> null).when(streamTransactionMetadataTasks).initializeStreamWriters(any(EventStreamClientFactory.class), any(ControllerEventProcessorConfig.class));
LinkedBlockingQueue<CompletableFuture<Boolean>> createScopeResponses = new LinkedBlockingQueue<>();
LinkedBlockingQueue<CompletableFuture<Void>> createScopeSignals = new LinkedBlockingQueue<>();
List<CompletableFuture<Boolean>> createScopeResponsesList = new LinkedList<>();
List<CompletableFuture<Void>> createScopeSignalsList = new LinkedList<>();
for (int i = 0; i < 2; i++) {
CompletableFuture<Boolean> responseFuture = new CompletableFuture<>();
CompletableFuture<Void> signalFuture = new CompletableFuture<>();
createScopeResponsesList.add(responseFuture);
createScopeResponses.add(responseFuture);
createScopeSignalsList.add(signalFuture);
createScopeSignals.add(signalFuture);
}
// return a future from latches queue
doAnswer(x -> {
createScopeSignals.take().complete(null);
return createScopeResponses.take();
}).when(controller).createScope(anyString());
LinkedBlockingQueue<CompletableFuture<Boolean>> createStreamResponses = new LinkedBlockingQueue<>();
LinkedBlockingQueue<CompletableFuture<Void>> createStreamSignals = new LinkedBlockingQueue<>();
List<CompletableFuture<Boolean>> createStreamResponsesList = new LinkedList<>();
List<CompletableFuture<Void>> createStreamSignalsList = new LinkedList<>();
for (int i = 0; i < 8; i++) {
CompletableFuture<Boolean> responseFuture = new CompletableFuture<>();
CompletableFuture<Void> signalFuture = new CompletableFuture<>();
createStreamResponsesList.add(responseFuture);
createStreamResponses.add(responseFuture);
createStreamSignalsList.add(signalFuture);
createStreamSignals.add(signalFuture);
}
// return a future from latches queue
doAnswer(x -> {
createStreamSignals.take().complete(null);
return createStreamResponses.take();
}).when(controller).createInternalStream(anyString(), anyString(), any());
@Cleanup ControllerEventProcessors processors = new ControllerEventProcessors("host1", config, controller, checkpointStore, streamStore, bucketStore, connectionPool, streamMetadataTasks, streamTransactionMetadataTasks, kvtStore, kvtTasks, system, executorService());
// call bootstrap on ControllerEventProcessors
processors.bootstrap(streamTransactionMetadataTasks, streamMetadataTasks, kvtTasks);
// wait on create scope being called.
createScopeSignalsList.get(0).join();
verify(controller, times(1)).createScope(any());
// complete scopeFuture1 exceptionally. this should result in a retry.
createScopeResponsesList.get(0).completeExceptionally(new RuntimeException());
// wait on second scope signal being called
createScopeSignalsList.get(1).join();
verify(controller, times(2)).createScope(any());
// so far no create stream should have been invoked
verify(controller, times(0)).createInternalStream(anyString(), anyString(), any());
// complete scopeFuture2 successfully
createScopeResponsesList.get(1).complete(true);
// create streams should be called now
// since we call four create streams. We will wait on first three signal futures
createStreamSignalsList.get(0).join();
createStreamSignalsList.get(1).join();
createStreamSignalsList.get(2).join();
createStreamSignalsList.get(3).join();
verify(controller, times(4)).createInternalStream(anyString(), anyString(), any());
// fail first four requests
createStreamResponsesList.get(0).completeExceptionally(new RuntimeException());
createStreamResponsesList.get(1).completeExceptionally(new RuntimeException());
createStreamResponsesList.get(2).completeExceptionally(new RuntimeException());
createStreamResponsesList.get(3).completeExceptionally(new RuntimeException());
// this should result in a retry for four create streams. wait on next four signals
createStreamSignalsList.get(4).join();
createStreamSignalsList.get(5).join();
createStreamSignalsList.get(6).join();
createStreamSignalsList.get(7).join();
verify(controller, times(8)).createInternalStream(anyString(), anyString(), any());
// complete successfully
createStreamResponsesList.get(4).complete(true);
createStreamResponsesList.get(5).complete(true);
createStreamResponsesList.get(6).complete(true);
createStreamResponsesList.get(7).complete(true);
AssertExtensions.assertEventuallyEquals(true, () -> processors.getBootstrapCompleted().get(), 10000);
}
Aggregations