use of com.sequenceiq.cloudbreak.service.CloudbreakException in project cloudbreak by hortonworks.
the class AmbariClusterSecurityServiceTest method testDisableSecurityWhenCancellationExceptionOccursThenShouldThrowCancellationException.
@Test
public void testDisableSecurityWhenCancellationExceptionOccursThenShouldThrowCancellationException() throws CloudbreakException {
Stack stack = TestUtil.stack();
Cluster cluster = TestUtil.cluster();
stack.setCluster(cluster);
AmbariClient ambariClient = Mockito.mock(AmbariClient.class);
when(clientFactory.getAmbariClient(stack, stack.getCluster())).thenReturn(ambariClient);
when(ambariClient.disableKerberos()).thenReturn(1);
Map<String, Integer> operationRequests = singletonMap("DISABLE_KERBEROS_REQUEST", 1);
ImmutablePair<PollingResult, Exception> pair = new ImmutablePair<>(PollingResult.EXIT, null);
String failed = "failed";
when(ambariOperationService.waitForOperations(stack, ambariClient, operationRequests, DISABLE_KERBEROS_STATE)).thenThrow(new CancellationException("cancel"));
when(cloudbreakMessagesService.getMessage(AMBARI_CLUSTER_DISABLE_KERBEROS_FAILED.code())).thenReturn("failed");
doThrow(new CancellationException("cancel")).when(ambariClusterConnectorPollingResultChecker).checkPollingResult(pair.getLeft(), failed);
thrown.expect(CancellationException.class);
thrown.expectMessage("cancel");
underTest.disableSecurity(stack);
verify(ambariOperationService, times(1)).waitForOperations(stack, ambariClient, operationRequests, DISABLE_KERBEROS_STATE);
verify(cloudbreakMessagesService, times(1)).getMessage(AMBARI_CLUSTER_DISABLE_KERBEROS_FAILED.code());
verify(ambariClusterConnectorPollingResultChecker, times(1)).checkPollingResult(pair.getLeft(), failed);
}
use of com.sequenceiq.cloudbreak.service.CloudbreakException in project cloudbreak by hortonworks.
the class AmbariClusterSecurityServiceTest method testPrepareSecurityWhenCancellationExceptionOccursThenShouldThrowCancellationException.
@Test
public void testPrepareSecurityWhenCancellationExceptionOccursThenShouldThrowCancellationException() throws CloudbreakException {
Stack stack = TestUtil.stack();
Cluster cluster = TestUtil.cluster();
stack.setCluster(cluster);
AmbariClient ambariClient = Mockito.mock(AmbariClient.class);
when(clientFactory.getAmbariClient(stack, stack.getCluster())).thenReturn(ambariClient);
when(ambariClient.startService(anyString())).thenReturn(1);
when(ambariClient.stopService(anyString())).thenReturn(1);
Map<String, Integer> operationRequests = new HashMap<>();
operationRequests.put("ZOOKEEPER_SERVICE_STATE", 1);
operationRequests.put("HDFS_SERVICE_STATE", 1);
operationRequests.put("YARN_SERVICE_STATE", 1);
operationRequests.put("MAPREDUCE2_SERVICE_STATE", 1);
operationRequests.put("KERBEROS_SERVICE_STATE", 1);
ImmutablePair<PollingResult, Exception> pair = new ImmutablePair<>(PollingResult.EXIT, null);
String failed = "failed";
when(ambariOperationService.waitForOperations(stack, ambariClient, operationRequests, PREPARE_DEKERBERIZING)).thenThrow(new CancellationException("cancel"));
when(cloudbreakMessagesService.getMessage(AMBARI_CLUSTER_PREPARE_DEKERBERIZING_FAILED.code())).thenReturn("failed");
doThrow(new CancellationException("cancel")).when(ambariClusterConnectorPollingResultChecker).checkPollingResult(pair.getLeft(), failed);
thrown.expect(CancellationException.class);
thrown.expectMessage("cancel");
underTest.prepareSecurity(stack);
verify(ambariOperationService, times(1)).waitForOperations(stack, ambariClient, operationRequests, PREPARE_DEKERBERIZING);
verify(cloudbreakMessagesService, times(1)).getMessage(AMBARI_CLUSTER_PREPARE_DEKERBERIZING_FAILED.code());
verify(ambariClusterConnectorPollingResultChecker, times(1)).checkPollingResult(pair.getLeft(), failed);
}
use of com.sequenceiq.cloudbreak.service.CloudbreakException in project cloudbreak by hortonworks.
the class AmbariClusterSecurityServiceTest method testDisableSecurityWhenEverythingWorks.
@Test
public void testDisableSecurityWhenEverythingWorks() throws CloudbreakException {
Stack stack = TestUtil.stack();
Cluster cluster = TestUtil.cluster();
stack.setCluster(cluster);
AmbariClient ambariClient = Mockito.mock(AmbariClient.class);
when(clientFactory.getAmbariClient(stack, stack.getCluster())).thenReturn(ambariClient);
when(ambariClient.disableKerberos()).thenReturn(1);
Map<String, Integer> operationRequests = singletonMap("DISABLE_KERBEROS_REQUEST", 1);
ImmutablePair<PollingResult, Exception> pair = new ImmutablePair<>(PollingResult.SUCCESS, null);
String failed = "failed";
when(ambariOperationService.waitForOperations(stack, ambariClient, operationRequests, DISABLE_KERBEROS_STATE)).thenReturn(pair);
when(cloudbreakMessagesService.getMessage(AMBARI_CLUSTER_DISABLE_KERBEROS_FAILED.code())).thenReturn("failed");
doNothing().when(ambariClusterConnectorPollingResultChecker).checkPollingResult(pair.getLeft(), failed);
underTest.disableSecurity(stack);
verify(ambariOperationService, times(1)).waitForOperations(stack, ambariClient, operationRequests, DISABLE_KERBEROS_STATE);
verify(cloudbreakMessagesService, times(1)).getMessage(AMBARI_CLUSTER_DISABLE_KERBEROS_FAILED.code());
verify(ambariClusterConnectorPollingResultChecker, times(1)).checkPollingResult(pair.getLeft(), failed);
}
use of com.sequenceiq.cloudbreak.service.CloudbreakException in project cloudbreak by hortonworks.
the class ClusterBootstrapper method bootstrapNewNodes.
public void bootstrapNewNodes(Long stackId, Set<String> upscaleCandidateAddresses, Collection<String> recoveryHostNames) throws CloudbreakException {
Stack stack = stackRepository.findOneWithLists(stackId);
Set<Node> nodes = new HashSet<>();
Set<Node> allNodes = new HashSet<>();
boolean recoveredNodes = Integer.valueOf(recoveryHostNames.size()).equals(upscaleCandidateAddresses.size());
Set<InstanceMetaData> metaDataSet = stack.getRunningInstanceMetaData().stream().filter(im -> im.getPrivateIp() != null && im.getPublicIpWrapper() != null).collect(Collectors.toSet());
String clusterDomain = metaDataSet.stream().filter(im -> isNoneBlank(im.getDiscoveryFQDN())).findAny().get().getDomain();
for (InstanceMetaData im : metaDataSet) {
Node node = createNode(stack.getCustomHostname(), im, clusterDomain, stack.isHostgroupNameAsHostname());
if (upscaleCandidateAddresses.contains(im.getPrivateIp())) {
// but only when we would have generated a hostname, otherwise use the cloud provider's default mechanism
if (recoveredNodes && isNoneBlank(node.getHostname())) {
Iterator<String> iterator = recoveryHostNames.iterator();
node.setHostname(iterator.next().split("\\.")[0]);
iterator.remove();
LOGGER.info("Set the hostname to {} for address: {}", node.getHostname(), im.getPrivateIp());
}
nodes.add(node);
}
allNodes.add(node);
}
try {
String stackOrchestratorType = stack.getOrchestrator().getType();
OrchestratorType orchestratorType = orchestratorTypeResolver.resolveType(stack.getOrchestrator().getType());
if (orchestratorType.hostOrchestrator()) {
List<GatewayConfig> allGatewayConfigs = gatewayConfigService.getAllGatewayConfigs(stack);
bootstrapNewNodesOnHost(stack, allGatewayConfigs, nodes, allNodes);
} else if (orchestratorType.containerOrchestrator()) {
LOGGER.info("Skipping bootstrap of the new machines because the stack's orchestrator type is '{}'.", stackOrchestratorType);
} else {
LOGGER.error("Orchestrator not found: {}", stackOrchestratorType);
throw new CloudbreakException("HostOrchestrator not found: " + stackOrchestratorType);
}
} catch (CloudbreakOrchestratorCancelledException e) {
throw new CancellationException(e.getMessage());
} catch (CloudbreakOrchestratorException e) {
throw new CloudbreakException(e);
}
}
use of com.sequenceiq.cloudbreak.service.CloudbreakException in project cloudbreak by hortonworks.
the class ClusterBootstrapper method bootstrapOnHost.
@SuppressFBWarnings("REC_CATCH_EXCEPTION")
@SuppressWarnings("unchecked")
public void bootstrapOnHost(Stack stack) throws CloudbreakException {
Set<Node> nodes = new HashSet<>();
String domain = hostDiscoveryService.determineDomain(stack.getCustomDomain(), stack.getName(), stack.isClusterNameAsSubdomain());
for (InstanceMetaData im : stack.getRunningInstanceMetaData()) {
if (im.getPrivateIp() == null && im.getPublicIpWrapper() == null) {
LOGGER.warn("Skipping instance metadata because the public ip and private ips are null '{}'.", im);
} else {
String generatedHostName = hostDiscoveryService.generateHostname(stack.getCustomHostname(), im.getInstanceGroupName(), im.getPrivateId(), stack.isHostgroupNameAsHostname());
nodes.add(new Node(im.getPrivateIp(), im.getPublicIpWrapper(), generatedHostName, domain, im.getInstanceGroupName()));
}
}
try {
HostOrchestrator hostOrchestrator = hostOrchestratorResolver.get(stack.getOrchestrator().getType());
List<GatewayConfig> allGatewayConfig = new ArrayList<>();
Boolean enableKnox = stack.getCluster().getGateway().getEnableGateway();
for (InstanceMetaData gateway : stack.getGatewayInstanceMetadata()) {
GatewayConfig gatewayConfig = gatewayConfigService.getGatewayConfig(stack, gateway, enableKnox);
allGatewayConfig.add(gatewayConfig);
PollingResult bootstrapApiPolling = hostBootstrapApiPollingService.pollWithTimeoutSingleFailure(hostBootstrapApiCheckerTask, new HostBootstrapApiContext(stack, gatewayConfig, hostOrchestrator), POLL_INTERVAL, MAX_POLLING_ATTEMPTS);
validatePollingResultForCancellation(bootstrapApiPolling, "Polling of bootstrap API was cancelled.");
}
ClusterComponent saltComponent = clusterComponentProvider.getComponent(stack.getCluster().getId(), ComponentType.SALT_STATE);
if (saltComponent == null) {
byte[] stateConfigZip = hostOrchestrator.getStateConfigZip();
saltComponent = new ClusterComponent(ComponentType.SALT_STATE, new Json(singletonMap(ComponentType.SALT_STATE.name(), Base64.encodeBase64String(stateConfigZip))), stack.getCluster());
clusterComponentProvider.store(saltComponent);
}
hostOrchestrator.bootstrap(allGatewayConfig, nodes, clusterDeletionBasedModel(stack.getId(), null));
InstanceMetaData primaryGateway = stack.getPrimaryGatewayInstance();
GatewayConfig gatewayConfig = gatewayConfigService.getGatewayConfig(stack, primaryGateway, enableKnox);
String gatewayIp = gatewayConfigService.getGatewayIp(stack, primaryGateway);
PollingResult allNodesAvailabilityPolling = hostClusterAvailabilityPollingService.pollWithTimeoutSingleFailure(hostClusterAvailabilityCheckerTask, new HostOrchestratorClusterContext(stack, hostOrchestrator, gatewayConfig, nodes), POLL_INTERVAL, MAX_POLLING_ATTEMPTS);
validatePollingResultForCancellation(allNodesAvailabilityPolling, "Polling of all nodes availability was cancelled.");
Orchestrator orchestrator = stack.getOrchestrator();
orchestrator.setApiEndpoint(gatewayIp + ':' + stack.getGatewayPort());
orchestrator.setType(hostOrchestrator.name());
orchestratorRepository.save(orchestrator);
if (TIMEOUT.equals(allNodesAvailabilityPolling)) {
clusterBootstrapperErrorHandler.terminateFailedNodes(hostOrchestrator, null, stack, gatewayConfig, nodes);
}
} catch (Exception e) {
throw new CloudbreakException(e);
}
}
Aggregations