use of io.pravega.controller.task.Stream.StreamMetadataTasks in project pravega by pravega.
the class StreamCutServiceTest method setup.
@Before
public void setup() throws Exception {
executor = Executors.newScheduledThreadPool(10);
hostId = UUID.randomUUID().toString();
streamMetadataStore = createStore(3, executor);
TaskMetadataStore taskMetadataStore = TaskStoreFactory.createInMemoryStore(executor);
HostControllerStore hostStore = HostStoreFactory.createInMemoryStore(HostMonitorConfigImpl.dummyConfig());
SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock();
connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().build());
streamMetadataTasks = new StreamMetadataTasks(streamMetadataStore, hostStore, taskMetadataStore, segmentHelper, executor, hostId, connectionFactory, false, "");
service = new StreamCutService(3, hostId, streamMetadataStore, streamMetadataTasks, executor);
service.startAsync();
service.awaitRunning();
}
use of io.pravega.controller.task.Stream.StreamMetadataTasks in project pravega by pravega.
the class ZkStoreRetentionTest method testOwnershipOfExistingBucket.
@Test(timeout = 10000)
public void testOwnershipOfExistingBucket() throws Exception {
TestingServer zkServer2 = new TestingServerStarter().start();
zkServer2.start();
CuratorFramework zkClient2 = CuratorFrameworkFactory.newClient(zkServer2.getConnectString(), 10000, 1000, (r, e, s) -> false);
zkClient2.start();
ScheduledExecutorService executor2 = Executors.newScheduledThreadPool(10);
String hostId = UUID.randomUUID().toString();
StreamMetadataStore streamMetadataStore2 = StreamStoreFactory.createZKStore(zkClient2, 1, executor2);
TaskMetadataStore taskMetadataStore = TaskStoreFactory.createInMemoryStore(executor2);
HostControllerStore hostStore = HostStoreFactory.createInMemoryStore(HostMonitorConfigImpl.dummyConfig());
SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock();
ConnectionFactoryImpl connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().build());
StreamMetadataTasks streamMetadataTasks2 = new StreamMetadataTasks(streamMetadataStore2, hostStore, taskMetadataStore, segmentHelper, executor2, hostId, connectionFactory, false, "");
String scope = "scope1";
String streamName = "stream1";
streamMetadataStore2.addUpdateStreamForAutoStreamCut(scope, streamName, RetentionPolicy.builder().build(), null, executor2).join();
String scope2 = "scope2";
String streamName2 = "stream2";
streamMetadataStore2.addUpdateStreamForAutoStreamCut(scope2, streamName2, RetentionPolicy.builder().build(), null, executor2).join();
StreamCutService service2 = new StreamCutService(1, hostId, streamMetadataStore2, streamMetadataTasks2, executor2);
service2.startAsync();
service2.awaitRunning();
assertTrue(service2.getBuckets().stream().allMatch(x -> x.getRetentionFutureMap().size() == 2));
service2.stopAsync();
service2.awaitTerminated();
zkClient2.close();
zkServer2.close();
streamMetadataTasks2.close();
connectionFactory.close();
executor2.shutdown();
}
use of io.pravega.controller.task.Stream.StreamMetadataTasks in project pravega by pravega.
the class ControllerServiceStarter method startUp.
@Override
protected void startUp() {
long traceId = LoggerHelpers.traceEnterWithContext(log, this.objectId, "startUp");
log.info("Initiating controller service startUp");
log.info("Controller serviceConfig = {}", serviceConfig.toString());
log.info("Event processors enabled = {}", serviceConfig.getEventProcessorConfig().isPresent());
log.info("Cluster listener enabled = {}", serviceConfig.isControllerClusterListenerEnabled());
log.info(" Host monitor enabled = {}", serviceConfig.getHostMonitorConfig().isHostMonitorEnabled());
log.info(" gRPC server enabled = {}", serviceConfig.getGRPCServerConfig().isPresent());
log.info(" REST server enabled = {}", serviceConfig.getRestServerConfig().isPresent());
final BucketStore bucketStore;
final TaskMetadataStore taskMetadataStore;
final HostControllerStore hostStore;
final CheckpointStore checkpointStore;
try {
// Initialize the executor service.
controllerExecutor = ExecutorServiceHelpers.newScheduledThreadPool(serviceConfig.getThreadPoolSize(), "controllerpool");
eventExecutor = ExecutorServiceHelpers.newScheduledThreadPool(serviceConfig.getThreadPoolSize(), "eventprocessor");
retentionExecutor = ExecutorServiceHelpers.newScheduledThreadPool(Config.RETENTION_THREAD_POOL_SIZE, "retentionpool");
watermarkingExecutor = ExecutorServiceHelpers.newScheduledThreadPool(Config.WATERMARKING_THREAD_POOL_SIZE, "watermarkingpool");
bucketStore = StreamStoreFactory.createBucketStore(storeClient, controllerExecutor);
log.info("Created the bucket store.");
taskMetadataStore = TaskStoreFactory.createStore(storeClient, controllerExecutor);
log.info("Created the task store.");
hostStore = HostStoreFactory.createStore(serviceConfig.getHostMonitorConfig(), storeClient);
log.info("Created the host store.");
checkpointStore = CheckpointStoreFactory.create(storeClient);
log.info("Created the checkpoint store.");
// Initialize Stream and Transaction metrics.
StreamMetrics.initialize();
TransactionMetrics.initialize();
// On each controller process restart, we use a fresh hostId,
// which is a combination of hostname and random GUID.
String hostName = getHostName();
Host host = new Host(hostName, getPort(), UUID.randomUUID().toString());
// Create a RequestTracker instance to trace client requests end-to-end.
GRPCServerConfig grpcServerConfig = serviceConfig.getGRPCServerConfig().get();
RequestTracker requestTracker = new RequestTracker(grpcServerConfig.isRequestTracingEnabled());
// Create a Health Service Manager instance.
healthServiceManager = new HealthServiceManager(serviceConfig.getHealthCheckFrequency());
if (serviceConfig.getHostMonitorConfig().isHostMonitorEnabled()) {
// Start the Segment Container Monitor.
monitor = new SegmentContainerMonitor(hostStore, (CuratorFramework) storeClient.getClient(), new UniformContainerBalancer(), serviceConfig.getHostMonitorConfig().getHostMonitorMinRebalanceInterval());
monitor.startAsync();
log.info("Started Segment Container Monitor service.");
SegmentContainerMonitorHealthContributor segmentContainerMonitorHC = new SegmentContainerMonitorHealthContributor("segmentContainerMonitor", monitor);
healthServiceManager.register(segmentContainerMonitorHC);
}
// This client config is used by the segment store helper (SegmentHelper) to connect to the segment store.
ClientConfig.ClientConfigBuilder clientConfigBuilder = ClientConfig.builder().controllerURI(URI.create((grpcServerConfig.isTlsEnabled() ? "tls://" : "tcp://") + "localhost:" + grpcServerConfig.getPort())).trustStore(grpcServerConfig.getTlsTrustStore()).validateHostName(false);
Optional<Boolean> tlsEnabledForSegmentStore = BooleanUtils.extract(serviceConfig.getTlsEnabledForSegmentStore());
if (tlsEnabledForSegmentStore.isPresent()) {
clientConfigBuilder.enableTlsToSegmentStore(tlsEnabledForSegmentStore.get());
}
// Use one connection per Segment Store to save up resources.
ClientConfig clientConfig = clientConfigBuilder.maxConnectionsPerSegmentStore(1).build();
connectionFactory = connectionFactoryRef.orElseGet(() -> new SocketConnectionFactoryImpl(clientConfig));
connectionPool = new ConnectionPoolImpl(clientConfig, connectionFactory);
segmentHelper = segmentHelperRef.orElseGet(() -> new SegmentHelper(connectionPool, hostStore, controllerExecutor));
GrpcAuthHelper authHelper = new GrpcAuthHelper(serviceConfig.getGRPCServerConfig().get().isAuthorizationEnabled(), grpcServerConfig.getTokenSigningKey(), grpcServerConfig.getAccessTokenTTLInSeconds());
streamStore = streamMetadataStoreRef.orElseGet(() -> StreamStoreFactory.createStore(storeClient, segmentHelper, authHelper, controllerExecutor));
log.info("Created the stream store.");
streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelper, controllerExecutor, eventExecutor, host.getHostId(), authHelper, serviceConfig.getRetentionFrequency().toMillis());
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelper, controllerExecutor, eventExecutor, host.getHostId(), serviceConfig.getTimeoutServiceConfig(), authHelper);
BucketServiceFactory bucketServiceFactory = new BucketServiceFactory(host.getHostId(), bucketStore, 1000);
Duration executionDurationRetention = serviceConfig.getRetentionFrequency();
PeriodicRetention retentionWork = new PeriodicRetention(streamStore, streamMetadataTasks, retentionExecutor, requestTracker);
retentionService = bucketServiceFactory.createRetentionService(executionDurationRetention, retentionWork::retention, retentionExecutor);
retentionService.startAsync();
retentionService.awaitRunning();
log.info("Started background periodic service for Retention.");
RetentionServiceHealthContributor retentionServiceHC = new RetentionServiceHealthContributor("retentionService", retentionService);
healthServiceManager.register(retentionServiceHC);
Duration executionDurationWatermarking = Duration.ofSeconds(Config.MINIMUM_WATERMARKING_FREQUENCY_IN_SECONDS);
watermarkingWork = new PeriodicWatermarking(streamStore, bucketStore, clientConfig, watermarkingExecutor, requestTracker);
watermarkingService = bucketServiceFactory.createWatermarkingService(executionDurationWatermarking, watermarkingWork::watermark, watermarkingExecutor);
watermarkingService.startAsync();
watermarkingService.awaitRunning();
log.info("Started background periodic service for Watermarking.");
WatermarkingServiceHealthContributor watermarkingServiceHC = new WatermarkingServiceHealthContributor("watermarkingService", watermarkingService);
healthServiceManager.register(watermarkingServiceHC);
// Controller has a mechanism to track the currently active controller host instances. On detecting a failure of
// any controller instance, the failure detector stores the failed HostId in a failed hosts directory (FH), and
// invokes the taskSweeper.sweepOrphanedTasks for each failed host. When all resources under the failed hostId
// are processed and deleted, that failed HostId is removed from FH folder.
// Moreover, on controller process startup, it detects any hostIds not in the currently active set of
// controllers and starts sweeping tasks orphaned by those hostIds.
TaskSweeper taskSweeper = new TaskSweeper(taskMetadataStore, host.getHostId(), controllerExecutor, streamMetadataTasks);
TxnSweeper txnSweeper = new TxnSweeper(streamStore, streamTransactionMetadataTasks, serviceConfig.getTimeoutServiceConfig().getMaxLeaseValue(), controllerExecutor);
RequestSweeper requestSweeper = new RequestSweeper(streamStore, controllerExecutor, streamMetadataTasks);
if (serviceConfig.isControllerClusterListenerEnabled()) {
cluster = new ClusterZKImpl((CuratorFramework) storeClient.getClient(), ClusterType.CONTROLLER);
}
kvtMetadataStore = kvtMetaStoreRef.orElseGet(() -> KVTableStoreFactory.createStore(storeClient, segmentHelper, authHelper, controllerExecutor, streamStore));
kvtMetadataTasks = new TableMetadataTasks(kvtMetadataStore, segmentHelper, controllerExecutor, eventExecutor, host.getHostId(), authHelper);
controllerService = new ControllerService(kvtMetadataStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, streamTransactionMetadataTasks, segmentHelper, controllerExecutor, cluster, requestTracker);
// Setup event processors.
setController(new LocalController(controllerService, grpcServerConfig.isAuthorizationEnabled(), grpcServerConfig.getTokenSigningKey()));
CompletableFuture<Void> eventProcessorFuture = CompletableFuture.completedFuture(null);
if (serviceConfig.getEventProcessorConfig().isPresent()) {
// Create ControllerEventProcessor object.
controllerEventProcessors = new ControllerEventProcessors(host.getHostId(), serviceConfig.getEventProcessorConfig().get(), localController, checkpointStore, streamStore, bucketStore, connectionPool, streamMetadataTasks, streamTransactionMetadataTasks, kvtMetadataStore, kvtMetadataTasks, eventExecutor);
// Bootstrap and start it asynchronously.
eventProcessorFuture = controllerEventProcessors.bootstrap(streamTransactionMetadataTasks, streamMetadataTasks, kvtMetadataTasks).thenAcceptAsync(x -> controllerEventProcessors.startAsync(), eventExecutor);
EventProcessorHealthContributor eventProcessorHC = new EventProcessorHealthContributor("eventProcessor", controllerEventProcessors);
healthServiceManager.register(eventProcessorHC);
}
// Setup and start controller cluster listener after all sweepers have been initialized.
if (serviceConfig.isControllerClusterListenerEnabled()) {
List<FailoverSweeper> failoverSweepers = new ArrayList<>();
failoverSweepers.add(taskSweeper);
failoverSweepers.add(txnSweeper);
failoverSweepers.add(requestSweeper);
if (serviceConfig.getEventProcessorConfig().isPresent()) {
assert controllerEventProcessors != null;
failoverSweepers.add(controllerEventProcessors);
}
controllerClusterListener = new ControllerClusterListener(host, cluster, controllerExecutor, failoverSweepers);
controllerClusterListener.startAsync();
ClusterListenerHealthContributor clusterListenerHC = new ClusterListenerHealthContributor("clusterListener", controllerClusterListener);
healthServiceManager.register(clusterListenerHC);
}
// Start the Health Service.
healthServiceManager.start();
// Start RPC server.
if (serviceConfig.getGRPCServerConfig().isPresent()) {
grpcServer = new GRPCServer(controllerService, grpcServerConfig, requestTracker);
grpcServer.startAsync();
grpcServer.awaitRunning();
GRPCServerHealthContributor grpcServerHC = new GRPCServerHealthContributor("GRPCServer", grpcServer);
healthServiceManager.register(grpcServerHC);
}
// Start REST server.
if (serviceConfig.getRestServerConfig().isPresent()) {
List<Object> resources = new ArrayList<>();
resources.add(new StreamMetadataResourceImpl(this.localController, controllerService, grpcServer.getAuthHandlerManager(), connectionFactory, clientConfig));
resources.add(new HealthImpl(grpcServer.getAuthHandlerManager(), healthServiceManager.getEndpoint()));
resources.add(new PingImpl());
MetricsProvider.getMetricsProvider().prometheusResource().ifPresent(resources::add);
restServer = new RESTServer(serviceConfig.getRestServerConfig().get(), Set.copyOf(resources));
restServer.startAsync();
restServer.awaitRunning();
}
// Wait for controller event processors to start.
if (serviceConfig.getEventProcessorConfig().isPresent()) {
// if store client has failed because of session expiration, there are two possibilities where
// controllerEventProcessors.awaitRunning may be stuck forever -
// 1. stream creation is retried indefinitely and cannot complete because of zk session expiration
// 2. event writer after stream creation throws exception.
// In both of above cases controllerEventProcessors.startAsync may not get called.
CompletableFuture.anyOf(storeClientFailureFuture, eventProcessorFuture.thenAccept(x -> controllerEventProcessors.awaitRunning())).join();
}
// Wait for controller cluster listeners to start.
if (serviceConfig.isControllerClusterListenerEnabled()) {
controllerClusterListener.awaitRunning();
}
} catch (Exception e) {
log.error("Failed trying to start controller services", e);
throw e;
} finally {
LoggerHelpers.traceLeave(log, this.objectId, "startUp", traceId);
}
}
use of io.pravega.controller.task.Stream.StreamMetadataTasks in project pravega by pravega.
the class ControllerEventProcessorPravegaTablesStreamTest method testTxnPartialCommitRetry.
@Test(timeout = 10000)
public void testTxnPartialCommitRetry() {
PravegaTablesStoreHelper storeHelper = spy(new PravegaTablesStoreHelper(SegmentHelperMock.getSegmentHelperMockForTables(executor), GrpcAuthHelper.getDisabledAuthHelper(), executor));
this.streamStore = new PravegaTablesStreamMetadataStore(PRAVEGA_ZK_CURATOR_RESOURCE.client, executor, Duration.ofHours(Config.COMPLETED_TRANSACTION_TTL_IN_HOURS), storeHelper);
SegmentHelper segmentHelperMock = SegmentHelperMock.getSegmentHelperMock();
EventHelper eventHelperMock = EventHelperMock.getEventHelperMock(executor, "1", ((AbstractStreamMetadataStore) this.streamStore).getHostTaskIndex());
StreamMetadataTasks streamMetadataTasks = new StreamMetadataTasks(streamStore, this.bucketStore, TaskStoreFactory.createInMemoryStore(executor), segmentHelperMock, executor, "1", GrpcAuthHelper.getDisabledAuthHelper(), eventHelperMock);
StreamTransactionMetadataTasks streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(this.streamStore, segmentHelperMock, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
streamTransactionMetadataTasks.initializeStreamWriters(new EventStreamWriterMock<>(), new EventStreamWriterMock<>());
String scope = "scope";
String stream = "stream";
// region createStream
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
streamStore.createScope(scope, null, executor).join();
long start = System.currentTimeMillis();
streamStore.createStream(scope, stream, configuration1, start, null, executor).join();
streamStore.setState(scope, stream, State.ACTIVE, null, executor).join();
StreamMetadataTasks spyStreamMetadataTasks = spy(streamMetadataTasks);
List<VersionedTransactionData> txnDataList = createAndCommitTransactions(3);
int epoch = txnDataList.get(0).getEpoch();
spyStreamMetadataTasks.setRequestEventWriter(new EventStreamWriterMock<>());
CommitRequestHandler commitEventProcessor = new CommitRequestHandler(streamStore, spyStreamMetadataTasks, streamTransactionMetadataTasks, bucketStore, executor);
final String committingTxnsRecordKey = "committingTxns";
long failingClientRequestId = 123L;
doReturn(failingClientRequestId).when(spyStreamMetadataTasks).getRequestId(any());
OperationContext context = this.streamStore.createStreamContext(scope, stream, failingClientRequestId);
streamStore.startCommitTransactions(scope, stream, 100, context, executor).join();
doReturn(Futures.failedFuture(new RuntimeException())).when(storeHelper).updateEntry(anyString(), eq(committingTxnsRecordKey), any(), ArgumentMatchers.<Function<String, byte[]>>any(), any(), eq(failingClientRequestId));
AssertExtensions.assertFutureThrows("Updating CommittingTxnRecord fails", commitEventProcessor.processEvent(new CommitEvent(scope, stream, epoch)), e -> Exceptions.unwrap(e) instanceof RuntimeException);
verify(storeHelper, times(1)).removeEntries(anyString(), any(), eq(failingClientRequestId));
VersionedMetadata<CommittingTransactionsRecord> versionedCommitRecord = this.streamStore.getVersionedCommittingTransactionsRecord(scope, stream, context, executor).join();
CommittingTransactionsRecord commitRecord = versionedCommitRecord.getObject();
assertFalse(CommittingTransactionsRecord.EMPTY.equals(commitRecord));
for (VersionedTransactionData txnData : txnDataList) {
checkTransactionState(scope, stream, txnData.getId(), TxnStatus.COMMITTED);
}
long goodClientRequestId = 4567L;
doReturn(goodClientRequestId).when(spyStreamMetadataTasks).getRequestId(any());
commitEventProcessor.processEvent(new CommitEvent(scope, stream, epoch)).join();
versionedCommitRecord = this.streamStore.getVersionedCommittingTransactionsRecord(scope, stream, context, executor).join();
commitRecord = versionedCommitRecord.getObject();
assertTrue(CommittingTransactionsRecord.EMPTY.equals(commitRecord));
for (VersionedTransactionData txnData : txnDataList) {
checkTransactionState(scope, stream, txnData.getId(), TxnStatus.COMMITTED);
}
}
use of io.pravega.controller.task.Stream.StreamMetadataTasks in project pravega by pravega.
the class ControllerEventProcessorsTest method testBootstrap.
@Test(timeout = 30000L)
public void testBootstrap() throws Exception {
LocalController controller = mock(LocalController.class);
CheckpointStore checkpointStore = mock(CheckpointStore.class);
StreamMetadataStore streamStore = mock(StreamMetadataStore.class);
BucketStore bucketStore = mock(BucketStore.class);
ConnectionPool connectionPool = mock(ConnectionPool.class);
StreamMetadataTasks streamMetadataTasks = mock(StreamMetadataTasks.class);
StreamTransactionMetadataTasks streamTransactionMetadataTasks = mock(StreamTransactionMetadataTasks.class);
KVTableMetadataStore kvtStore = mock(KVTableMetadataStore.class);
TableMetadataTasks kvtTasks = mock(TableMetadataTasks.class);
ControllerEventProcessorConfig config = ControllerEventProcessorConfigImpl.withDefault();
EventProcessorSystem system = mock(EventProcessorSystem.class);
doAnswer(x -> null).when(streamMetadataTasks).initializeStreamWriters(any(), any());
doAnswer(x -> null).when(streamTransactionMetadataTasks).initializeStreamWriters(any(EventStreamClientFactory.class), any(ControllerEventProcessorConfig.class));
LinkedBlockingQueue<CompletableFuture<Boolean>> createScopeResponses = new LinkedBlockingQueue<>();
LinkedBlockingQueue<CompletableFuture<Void>> createScopeSignals = new LinkedBlockingQueue<>();
List<CompletableFuture<Boolean>> createScopeResponsesList = new LinkedList<>();
List<CompletableFuture<Void>> createScopeSignalsList = new LinkedList<>();
for (int i = 0; i < 2; i++) {
CompletableFuture<Boolean> responseFuture = new CompletableFuture<>();
CompletableFuture<Void> signalFuture = new CompletableFuture<>();
createScopeResponsesList.add(responseFuture);
createScopeResponses.add(responseFuture);
createScopeSignalsList.add(signalFuture);
createScopeSignals.add(signalFuture);
}
// return a future from latches queue
doAnswer(x -> {
createScopeSignals.take().complete(null);
return createScopeResponses.take();
}).when(controller).createScope(anyString());
LinkedBlockingQueue<CompletableFuture<Boolean>> createStreamResponses = new LinkedBlockingQueue<>();
LinkedBlockingQueue<CompletableFuture<Void>> createStreamSignals = new LinkedBlockingQueue<>();
List<CompletableFuture<Boolean>> createStreamResponsesList = new LinkedList<>();
List<CompletableFuture<Void>> createStreamSignalsList = new LinkedList<>();
for (int i = 0; i < 8; i++) {
CompletableFuture<Boolean> responseFuture = new CompletableFuture<>();
CompletableFuture<Void> signalFuture = new CompletableFuture<>();
createStreamResponsesList.add(responseFuture);
createStreamResponses.add(responseFuture);
createStreamSignalsList.add(signalFuture);
createStreamSignals.add(signalFuture);
}
// return a future from latches queue
doAnswer(x -> {
createStreamSignals.take().complete(null);
return createStreamResponses.take();
}).when(controller).createInternalStream(anyString(), anyString(), any());
@Cleanup ControllerEventProcessors processors = new ControllerEventProcessors("host1", config, controller, checkpointStore, streamStore, bucketStore, connectionPool, streamMetadataTasks, streamTransactionMetadataTasks, kvtStore, kvtTasks, system, executorService());
// call bootstrap on ControllerEventProcessors
processors.bootstrap(streamTransactionMetadataTasks, streamMetadataTasks, kvtTasks);
// wait on create scope being called.
createScopeSignalsList.get(0).join();
verify(controller, times(1)).createScope(any());
// complete scopeFuture1 exceptionally. this should result in a retry.
createScopeResponsesList.get(0).completeExceptionally(new RuntimeException());
// wait on second scope signal being called
createScopeSignalsList.get(1).join();
verify(controller, times(2)).createScope(any());
// so far no create stream should have been invoked
verify(controller, times(0)).createInternalStream(anyString(), anyString(), any());
// complete scopeFuture2 successfully
createScopeResponsesList.get(1).complete(true);
// create streams should be called now
// since we call four create streams. We will wait on first three signal futures
createStreamSignalsList.get(0).join();
createStreamSignalsList.get(1).join();
createStreamSignalsList.get(2).join();
createStreamSignalsList.get(3).join();
verify(controller, times(4)).createInternalStream(anyString(), anyString(), any());
// fail first four requests
createStreamResponsesList.get(0).completeExceptionally(new RuntimeException());
createStreamResponsesList.get(1).completeExceptionally(new RuntimeException());
createStreamResponsesList.get(2).completeExceptionally(new RuntimeException());
createStreamResponsesList.get(3).completeExceptionally(new RuntimeException());
// this should result in a retry for four create streams. wait on next four signals
createStreamSignalsList.get(4).join();
createStreamSignalsList.get(5).join();
createStreamSignalsList.get(6).join();
createStreamSignalsList.get(7).join();
verify(controller, times(8)).createInternalStream(anyString(), anyString(), any());
// complete successfully
createStreamResponsesList.get(4).complete(true);
createStreamResponsesList.get(5).complete(true);
createStreamResponsesList.get(6).complete(true);
createStreamResponsesList.get(7).complete(true);
AssertExtensions.assertEventuallyEquals(true, () -> processors.getBootstrapCompleted().get(), 10000);
}
Aggregations