use of com.sequenceiq.flow.reactor.api.handler.HandlerEvent in project cloudbreak by hortonworks.
the class SdxCmSyncWaitHandlerTest method getEvent.
private HandlerEvent<SdxCmSyncWaitEvent> getEvent() {
SdxCmSyncWaitEvent sdxCmSyncWaitEvent = mock(SdxCmSyncWaitEvent.class);
when(sdxCmSyncWaitEvent.getResourceId()).thenReturn(SDX_ID);
return new HandlerEvent<>(new Event<>(sdxCmSyncWaitEvent));
}
use of com.sequenceiq.flow.reactor.api.handler.HandlerEvent in project cloudbreak by hortonworks.
the class StopStartUpscaleCommissionViaCMHandler method doAccept.
@Override
protected Selectable doAccept(HandlerEvent<StopStartUpscaleCommissionViaCMRequest> event) {
StopStartUpscaleCommissionViaCMRequest request = event.getData();
LOGGER.info("StopStartUpscaleCommissionViaCMHandler for: {}, {}", event.getData().getResourceId(), event);
LOGGER.debug("StartedInstancesToCommission: {}, servicesNotRunningInstancesToCommission: {}", request.getStartedInstancesToCommission(), request.getServicesNotRunningInstancesToCommission());
List<InstanceMetaData> allInstancesToCommission = new LinkedList<>();
allInstancesToCommission.addAll(request.getStartedInstancesToCommission());
allInstancesToCommission.addAll(request.getServicesNotRunningInstancesToCommission());
try {
Stack stack = stackService.getByIdWithLists(request.getResourceId());
Cluster cluster = stack.getCluster();
flowMessageService.fireEventAndLog(stack.getId(), UPDATE_IN_PROGRESS.name(), CLUSTER_SCALING_STOPSTART_UPSCALE_WAITING_HOSTSTART, String.valueOf(allInstancesToCommission.size()));
ClusterSetupService clusterSetupService = clusterApiConnectors.getConnector(stack).clusterSetupService();
clusterSetupService.waitForHostsHealthy(new HashSet<>(allInstancesToCommission));
flowMessageService.fireEventAndLog(stack.getId(), UPDATE_IN_PROGRESS.name(), CLUSTER_SCALING_STOPSTART_UPSCALE_CMHOSTSSTARTED, String.valueOf(allInstancesToCommission.size()));
ClusterCommissionService clusterCommissionService = clusterApiConnectors.getConnector(stack).clusterCommissionService();
Set<String> hostNames = allInstancesToCommission.stream().map(i -> i.getDiscoveryFQDN()).collect(Collectors.toSet());
LOGGER.debug("HostNames to recommission: count={}, hostNames={}", hostNames.size(), hostNames);
HostGroup hostGroup = hostGroupService.getByClusterIdAndName(cluster.getId(), request.getHostGroupName()).orElseThrow(NotFoundException.notFound("hostgroup", request.getHostGroupName()));
Map<String, InstanceMetaData> hostsToRecommission = clusterCommissionService.collectHostsToCommission(hostGroup, hostNames);
List<String> missingHostsInCm = Collections.emptyList();
if (hostNames.size() != hostsToRecommission.size()) {
missingHostsInCm = hostNames.stream().filter(h -> !hostsToRecommission.containsKey(h)).collect(Collectors.toList());
LOGGER.info("Found fewer instances in CM to commission, as compared to initial ask. foundCount={}, initialCount={}, missingHostsInCm={}", hostsToRecommission.size(), hostNames.size(), missingHostsInCm);
}
// TODO CB-15132: Eventually ensure CM, relevant services (YARN RM) are in a functional state - or fail/delay the operation
// TODO CB-15132: Potentially poll nodes for success. Don't fail the entire operation if a single node fails to commission.
// What would need to happen to the CM command in this case? (Can only work in the presence of a co-operative CM API call.
// Alternately this could go straight to the service)
Set<String> recommissionedHostnames = Collections.emptySet();
if (hostsToRecommission.size() > 0) {
recommissionedHostnames = clusterCommissionService.recommissionClusterNodes(hostsToRecommission);
// TODO CB-15132: Maybe wait for services to start / force CM sync.
}
List<String> allMissingRecommissionHostnames = null;
if (missingHostsInCm.size() > 0) {
allMissingRecommissionHostnames = new LinkedList<>(missingHostsInCm);
}
if (hostsToRecommission.size() != recommissionedHostnames.size()) {
Set<String> finalRecommissionedHostnames = recommissionedHostnames;
List<String> additionalMissingRecommissionHostnames = hostsToRecommission.keySet().stream().filter(h -> !finalRecommissionedHostnames.contains(h)).collect(Collectors.toList());
LOGGER.info("Recommissioned fewer instances than requested. recommissionedCount={}, expectedCount={}, initialCount={}, notRecommissioned=[{}]", recommissionedHostnames.size(), hostsToRecommission.size(), hostNames.size(), additionalMissingRecommissionHostnames);
if (allMissingRecommissionHostnames == null) {
allMissingRecommissionHostnames = new LinkedList<>();
}
allMissingRecommissionHostnames.addAll(additionalMissingRecommissionHostnames);
}
return new StopStartUpscaleCommissionViaCMResult(request, recommissionedHostnames, allMissingRecommissionHostnames);
} catch (Exception e) {
// TODO CB-15132: This can be improved based on where and when the Exception occurred to potentially rollback certain aspects.
// ClusterClientInitException is one which is explicitly thrown.
String message = "Failed while attempting to commission nodes via CM";
LOGGER.error(message);
return new StopStartUpscaleCommissionViaCMResult(message, e, request);
}
}
use of com.sequenceiq.flow.reactor.api.handler.HandlerEvent in project cloudbreak by hortonworks.
the class SendConsumptionEventHandlerTest method testExecuteOperation.
@Test
public void testExecuteOperation() {
String resourceCrn = "consumptionCrn";
Long resourceId = 1L;
StorageConsumptionCollectionHandlerEvent event = new StorageConsumptionCollectionHandlerEvent(SEND_CONSUMPTION_EVENT_HANDLER.selector(), resourceId, resourceCrn, null);
StorageConsumptionCollectionEvent result = (StorageConsumptionCollectionEvent) underTest.doAccept(new HandlerEvent<>(new Event<>(event)));
assertEquals(resourceCrn, result.getResourceCrn());
assertEquals(resourceId, result.getResourceId());
assertEquals(STORAGE_CONSUMPTION_COLLECTION_FINISH_EVENT.selector(), result.selector());
}
use of com.sequenceiq.flow.reactor.api.handler.HandlerEvent in project cloudbreak by hortonworks.
the class StorageConsumptionCollectionHandlerTest method testExecuteOperation.
@Test
public void testExecuteOperation() {
String resourceCrn = "consumptionCrn";
Long resourceId = 1L;
String envCrn = "envCrn";
Consumption consumption = new Consumption();
consumption.setResourceCrn(resourceCrn);
consumption.setId(resourceId);
consumption.setEnvironmentCrn(envCrn);
ConsumptionContext context = new ConsumptionContext(null, consumption);
StorageConsumptionCollectionHandlerEvent event = new StorageConsumptionCollectionHandlerEvent(STORAGE_CONSUMPTION_COLLECTION_HANDLER.selector(), resourceId, resourceCrn, context);
when(credentialService.getCredentialByEnvCrn(envCrn)).thenReturn(credential);
when(credentialConverter.convert(credential)).thenReturn(new CloudCredential());
StorageConsumptionCollectionEvent result = (StorageConsumptionCollectionEvent) underTest.doAccept(new HandlerEvent<>(new Event<>(event)));
verify(credentialService).getCredentialByEnvCrn(envCrn);
verify(credentialConverter).convert(credential);
assertEquals(resourceCrn, result.getResourceCrn());
assertEquals(resourceId, result.getResourceId());
assertEquals(SEND_CONSUMPTION_EVENT_EVENT.selector(), result.selector());
}
use of com.sequenceiq.flow.reactor.api.handler.HandlerEvent in project cloudbreak by hortonworks.
the class CreateFullBackupHandlerTest method testOrchestratorThrowException.
@Test
public void testOrchestratorThrowException() throws CloudbreakOrchestratorFailedException {
Stack stack = mock(Stack.class);
when(stackService.getByIdWithListsInTransaction(2L)).thenReturn(stack);
Set<InstanceMetaData> metaDataSet = Set.of();
when(stack.getNotDeletedInstanceMetaDataSet()).thenReturn(metaDataSet);
Node node1 = createNode("node1");
Set<Node> nodes = Set.of(node1);
when(nodeService.mapInstancesToNodes(metaDataSet)).thenReturn(nodes);
GatewayConfig gatewayConfig = mock(GatewayConfig.class);
when(gatewayConfigService.getPrimaryGatewayConfig(stack)).thenReturn(gatewayConfig);
doThrow(new CloudbreakOrchestratorFailedException("tada")).when(orchestrator).runOrchestratorState(any(OrchestratorStateParams.class));
StackFailureEvent result = (StackFailureEvent) underTest.doAccept(new HandlerEvent<>(new Event<>(new CreateFullBackupEvent(2L))));
assertEquals(2L, result.getResourceId());
assertEquals(FullBackupEvent.FULL_BACKUP_FAILED_EVENT.event(), result.selector());
assertEquals("tada", result.getException().getMessage());
}
Aggregations