use of com.sequenceiq.it.cloudbreak.exception.TestFailException in project cloudbreak by hortonworks.
the class ClouderaManagerClientActions method checkCmServicesStartedSuccessfully.
public DistroXTestDto checkCmServicesStartedSuccessfully(DistroXTestDto testDto, String user, String password) {
String serverIp = testDto.getResponse().getCluster().getServerIp();
ApiClient apiClient = getCmApiClientWithTimeoutDisabledDirect(serverIp, testDto.getName(), V_43, user, password);
// CHECKSTYLE:OFF
HostsResourceApi hostsResourceApi = new HostsResourceApi(apiClient);
// CHECKSTYLE:ON
try {
Set<String> masterHostnames = testDto.getResponse().getInstanceGroups().stream().filter(ig -> "master".equals(ig.getName())).flatMap(ig -> ig.getMetadata().stream()).map(InstanceMetaDataV4Response::getDiscoveryFQDN).collect(Collectors.toSet());
List<ApiHost> apiHosts = hostsResourceApi.readHosts(null, null, "FULL_WITH_HEALTH_CHECK_EXPLANATION").getItems();
Set<String> servicesNotStarted = apiHosts.stream().filter(apiHost -> masterHostnames.contains(apiHost.getHostname())).flatMap(apiHost -> collectNotStartedServicesOnHost(apiHost).stream()).filter(serviceName -> REQUIRED_SERVICES.contains(serviceName)).collect(Collectors.toSet());
if (!servicesNotStarted.isEmpty()) {
LOGGER.error("There are not started required services: {}", servicesNotStarted);
throw new TestFailException(String.format("There are not started required services: %s", servicesNotStarted));
}
} catch (ApiException e) {
LOGGER.error("Exception when calling HostsResourceApi#readHosts. Response: {}", e.getResponseBody(), e);
String message = format("Exception when calling HostsResourceApi#readHosts at %s. Response: %s", apiClient.getBasePath(), e.getResponseBody());
throw new TestFailException(message, e);
} catch (Exception e) {
LOGGER.error("Can't read host statuses at: '{}'!", apiClient.getBasePath());
throw new TestFailException("Can't read host statuses at: " + apiClient.getBasePath(), e);
}
return testDto;
}
use of com.sequenceiq.it.cloudbreak.exception.TestFailException in project cloudbreak by hortonworks.
the class ClouderaManagerClientActions method checkCmHdfsDatanodeRoleConfigGroups.
public DistroXTestDto checkCmHdfsDatanodeRoleConfigGroups(DistroXTestDto testDto, String user, String password, Set<String> mountPoints) {
String serverIp = testDto.getResponse().getCluster().getServerIp();
ApiClient apiClient = getCmApiClientWithTimeoutDisabled(serverIp, testDto.getName(), V_43, user, password);
// CHECKSTYLE:OFF
RoleConfigGroupsResourceApi roleConfigGroupsResourceApi = new RoleConfigGroupsResourceApi(apiClient);
// CHECKSTYLE:ON
try {
ApiConfigList hdfsConfigs = roleConfigGroupsResourceApi.readConfig(testDto.getName(), "hdfs-DATANODE-BASE", "hdfs", "summary");
hdfsConfigs.getItems().forEach(config -> {
String hdfsConfigName = config.getName();
String mappingsFromHdfsConfig = config.getValue();
if ("dfs_data_dir_list".equalsIgnoreCase(hdfsConfigName)) {
if (mountPoints.stream().anyMatch(mappingsFromHdfsConfig::startsWith)) {
LOGGER.error("{} contains ephemeral volume mapping '{}'!", hdfsConfigName, mappingsFromHdfsConfig);
throw new TestFailException(String.format("%s contains ephemeral volume mapping '%s'!", hdfsConfigName, mappingsFromHdfsConfig));
} else {
Log.log(LOGGER, format(" '%s' does not contain the ephemeral mapping '%s', as expected. ", hdfsConfigName, mappingsFromHdfsConfig));
}
}
});
if (hdfsConfigs.getItems().isEmpty()) {
LOGGER.error("Datanode mappings are NOT exist!");
throw new TestFailException("Datanode mappings are NOT exist!");
}
} catch (ApiException e) {
LOGGER.error("Exception when calling RoleConfigGroupsResourceApi#readConfig. Response: {}", e.getResponseBody(), e);
String message = format("Exception when calling RoleConfigGroupsResourceApi#readConfig at %s. Response: %s", apiClient.getBasePath(), e.getResponseBody());
throw new TestFailException(message, e);
} catch (Exception e) {
LOGGER.error("Can't read config at: '{}'!", apiClient.getBasePath());
throw new TestFailException("Can't read config at: " + apiClient.getBasePath(), e);
}
return testDto;
}
use of com.sequenceiq.it.cloudbreak.exception.TestFailException in project cloudbreak by hortonworks.
the class GcpClientActions method deleteHostGroupInstances.
public void deleteHostGroupInstances(List<String> instanceIds) {
LOGGER.info("Deleting instances: '{}'", String.join(", ", instanceIds));
Compute compute = buildCompute();
for (String instanceId : instanceIds) {
try {
Operation deleteOperationResponse = compute.instances().delete(getProjectId(), gcpProperties.getAvailabilityZone(), instanceId).execute();
Log.log(LOGGER, format(" Gcp instance [%s] state is [%s] with message: %s", instanceId, deleteOperationResponse.getStatus(), deleteOperationResponse.getStatusMessage()));
try {
waitForComplete(compute, deleteOperationResponse, getProjectId(), TIMEOUT);
} catch (Exception e) {
String defaultErrorMessageForInstanceDeletion = getDefaultErrorMessageForInstanceDeletion(instanceId, deleteOperationResponse);
LOGGER.error(defaultErrorMessageForInstanceDeletion, e);
throw new TestFailException(defaultErrorMessageForInstanceDeletion, e);
}
if (deleteOperationResponse.getStatus().equals("DONE")) {
Log.log(LOGGER, format(" Gcp Instance: %s state is DELETED ", instanceId));
} else {
String defaultErrorMessageForInstanceDeletion = getDefaultErrorMessageForInstanceDeletion(instanceId, deleteOperationResponse);
LOGGER.error(defaultErrorMessageForInstanceDeletion);
throw new TestFailException(defaultErrorMessageForInstanceDeletion);
}
} catch (GoogleJsonResponseException e) {
if (!e.getMessage().contains("Not Found")) {
handleGeneralInstanceDeletionError(instanceId, e);
} else {
LOGGER.info(String.format("Gcp instance [%s] is not found, thus it is deleted.", instanceId));
}
} catch (IOException e) {
handleGeneralInstanceDeletionError(instanceId, e);
}
}
}
use of com.sequenceiq.it.cloudbreak.exception.TestFailException in project cloudbreak by hortonworks.
the class GcpClientActions method stopHostGroupInstances.
public void stopHostGroupInstances(List<String> instanceIds) {
LOGGER.info("Stopping instances: '{}'", String.join(", ", instanceIds));
Compute compute = buildCompute();
for (String instanceId : instanceIds) {
try {
Operation stopInstanceResponse = compute.instances().stop(getProjectId(), gcpProperties.getAvailabilityZone(), instanceId).execute();
Log.log(LOGGER, format(" Gcp instance [%s] state is [%s] with message: %s", instanceId, stopInstanceResponse.getStatus(), stopInstanceResponse.getStatusMessage()));
waitForComplete(compute, stopInstanceResponse, getProjectId(), TIMEOUT);
if (stopInstanceResponse.getStatus().equals("DONE")) {
Log.log(LOGGER, format(" Gcp Instance: %s state is STOPPED ", instanceId));
} else {
LOGGER.error("Gcp Instance: {} stop has not been successful. So the actual state is: {} with message: {}", instanceId, stopInstanceResponse.getStatus(), stopInstanceResponse.getStatusMessage());
throw new TestFailException(" Gcp Instance: " + instanceId + " stop has not been successful, because of the actual state is: " + stopInstanceResponse.getStatus() + " with message: " + stopInstanceResponse.getStatusMessage());
}
} catch (Exception e) {
LOGGER.warn(String.format("Failed to get the details of the instance from Gcp with instance id: '%s'", instanceId), e);
}
}
}
use of com.sequenceiq.it.cloudbreak.exception.TestFailException in project cloudbreak by hortonworks.
the class GcpClientActions method deleteNonVersionedBucket.
public void deleteNonVersionedBucket(String baseLocation) {
LOGGER.info("Delete bucket from base location: '{}'", baseLocation);
Storage storage = buildStorage();
try {
Storage.Buckets.Delete operation = storage.buckets().delete(baseLocation);
operation.execute();
} catch (IOException ioException) {
String msg = String.format("Failed to delete bucket from base location '%s'", baseLocation);
LOGGER.error(msg, ioException);
throw new TestFailException(msg, ioException);
}
}
Aggregations