use of com.sequenceiq.cloudbreak.cluster.api.ClusterApi in project cloudbreak by hortonworks.
the class ClusterUpgradeHandler method getRemoteDataContext.
private Optional<String> getRemoteDataContext(Stack stack) {
Optional<String> remoteDataContext = Optional.empty();
if (!stack.isDatalake() && StringUtils.isNotEmpty(stack.getDatalakeCrn())) {
Stack datalake = stackService.getByCrn(stack.getDatalakeCrn());
LOGGER.info("Fetch the Remote Data Context from {} to update the Data Hub", stack.getName());
ClusterApi datalakeConnector = clusterApiConnectors.getConnector(datalake);
remoteDataContext = Optional.of(datalakeConnector.getSdxContext());
}
return remoteDataContext;
}
use of com.sequenceiq.cloudbreak.cluster.api.ClusterApi in project cloudbreak by hortonworks.
the class ClusterUpscaleServiceTest method testInstallServicesOnNewHostWithRestartButThereIsAnUnhealthyNode.
@Test
public void testInstallServicesOnNewHostWithRestartButThereIsAnUnhealthyNode() throws CloudbreakException {
Stack stack = new Stack();
stack.setId(1L);
Cluster cluster = new Cluster();
cluster.setId(2L);
stack.setCluster(cluster);
when(stackService.getByIdWithClusterInTransaction(eq(1L))).thenReturn(stack);
ClusterApi clusterApi = mock(ClusterApi.class);
when(clusterApiConnectors.getConnector(any(Stack.class))).thenReturn(clusterApi);
HostGroup hostGroup = new HostGroup();
InstanceGroup instanceGroup = new InstanceGroup();
InstanceMetaData im1 = new InstanceMetaData();
im1.setInstanceStatus(InstanceStatus.SERVICES_HEALTHY);
InstanceMetaData im2 = new InstanceMetaData();
im2.setInstanceStatus(InstanceStatus.SERVICES_HEALTHY);
InstanceMetaData im3 = new InstanceMetaData();
im3.setInstanceStatus(InstanceStatus.DELETED_BY_PROVIDER);
stack.setInstanceGroups(Set.of(instanceGroup));
instanceGroup.setInstanceMetaData(Set.of(im1, im2, im3));
hostGroup.setInstanceGroup(instanceGroup);
when(hostGroupService.getByClusterWithRecipes(any())).thenReturn(Set.of(hostGroup));
when(parcelService.removeUnusedParcelComponents(stack)).thenReturn(new ParcelOperationStatus(Collections.emptyMap(), Collections.emptyMap()));
underTest.installServicesOnNewHosts(1L, Set.of("master"), true, true);
verify(clusterApi, times(1)).upscaleCluster(any());
verify(clusterApi, times(0)).restartAll(false);
verify(parcelService).removeUnusedParcelComponents(stack);
}
use of com.sequenceiq.cloudbreak.cluster.api.ClusterApi in project cloudbreak by hortonworks.
the class ClusterUpgradeExistingUpgradeCommandValidationHandler method doAccept.
@Override
protected Selectable doAccept(HandlerEvent<ClusterUpgradeExistingUpgradeCommandValidationEvent> event) {
LOGGER.debug("Accepting Cluster upgrade existing upgradeCDH command validation event.");
ClusterUpgradeExistingUpgradeCommandValidationEvent request = event.getData();
Image targetImage = request.getImage();
Long stackId = request.getResourceId();
Stack stack = getStack(stackId);
ClusterApi connector = clusterApiConnectors.getConnector(stack);
Optional<ClusterManagerCommand> optionalUpgradeCommand = connector.clusterStatusService().findCommand(stack, ClusterCommandType.UPGRADE_CLUSTER);
if (optionalUpgradeCommand.isEmpty()) {
LOGGER.debug("There is no existing upgradeCDH command, validation passed successfully");
return new ClusterUpgradeExistingUpgradeCommandValidationFinishedEvent(stackId);
} else {
ClusterManagerCommand upgradeCommand = optionalUpgradeCommand.get();
if (upgradeCommand.getActive() || (!upgradeCommand.getSuccess() && upgradeCommand.getRetryable())) {
return validateIfExistingRuntimeMatchesTargetRuntime(stack, connector, targetImage);
} else {
LOGGER.debug("There is no retryable upgradeCDH command, validation passed successfully");
return new ClusterUpgradeExistingUpgradeCommandValidationFinishedEvent(stackId);
}
}
}
use of com.sequenceiq.cloudbreak.cluster.api.ClusterApi in project cloudbreak by hortonworks.
the class StackStatusCheckerJob method doSync.
private void doSync(Stack stack) {
ClusterApi connector = clusterApiConnectors.getConnector(stack);
Set<InstanceMetaData> runningInstances = instanceMetaDataService.findNotTerminatedAndNotZombieForStack(stack.getId());
try {
if (isClusterManagerRunning(stack, connector)) {
ExtendedHostStatuses extendedHostStatuses = getExtendedHostStatuses(stack, connector);
Map<HostName, Set<HealthCheck>> hostStatuses = extendedHostStatuses.getHostsHealth();
LOGGER.debug("Cluster '{}' state check, host certicates expiring: [{}], cm running, hoststates: {}", stack.getId(), extendedHostStatuses.isAnyCertExpiring(), hostStatuses);
reportHealthAndSyncInstances(stack, runningInstances, getFailedInstancesInstanceMetadata(extendedHostStatuses, runningInstances), getNewHealthyHostNames(extendedHostStatuses, runningInstances), extendedHostStatuses.isAnyCertExpiring());
} else {
syncInstances(stack, runningInstances, false);
}
} catch (RuntimeException e) {
LOGGER.warn("Error during sync", e);
syncInstances(stack, runningInstances, false);
}
}
use of com.sequenceiq.cloudbreak.cluster.api.ClusterApi in project cloudbreak by hortonworks.
the class DeregisterServicesHandler method accept.
@Override
public void accept(Event<DeregisterServicesRequest> event) {
DeregisterServicesResult result;
try {
LOGGER.info("Received DeregisterServicesRequest event: {}", event.getData());
Stack stack = stackService.getByIdWithListsInTransaction(event.getData().getResourceId());
ClusterApi clusterApi = clusterApiConnectors.getConnector(stack);
Optional<DatalakeDto> datalakeDto = Optional.empty();
Optional<Stack> dataLakeOptional = datalakeService.getDatalakeStackByDatahubStack(stack);
if (dataLakeOptional.isPresent()) {
Stack dataLake = dataLakeOptional.get();
HttpClientConfig httpClientConfig = tlsSecurityService.buildTLSClientConfigForPrimaryGateway(dataLake.getId(), dataLake.getClusterManagerIp(), dataLake.cloudPlatform());
datalakeDto = Optional.ofNullable(DatalakeDto.DatalakeDtoBuilder.aDatalakeDto().withGatewayPort(dataLake.getGatewayPort()).withHttpClientConfig(httpClientConfig).withPassword(dataLake.getCluster().getCloudbreakAmbariPassword()).withUser(dataLake.getCluster().getCloudbreakAmbariUser()).withName(dataLake.getName()).build());
}
clusterApi.clusterSecurityService().deregisterServices(stack.getName(), datalakeDto);
LOGGER.info("Finished disabling Security");
result = new DeregisterServicesResult(event.getData());
} catch (Exception e) {
LOGGER.warn("An error has occured during disabling security", e);
result = new DeregisterServicesResult(e.getMessage(), e, event.getData());
}
LOGGER.info("Sending out DeregisterServicesResult: {}", result);
eventBus.notify(result.selector(), new Event<>(event.getHeaders(), result));
LOGGER.info("DeregisterServicesResult has been sent");
}
Aggregations