use of com.sequenceiq.cloudbreak.api.endpoint.v1.StackV1Endpoint in project cloudbreak by hortonworks.
the class ManualRecoveryTest method testManualRecovery.
@Test
@Parameters({ "hostGroup", "removeOnly", "removedInstanceCount" })
public void testManualRecovery(String hostGroup, @Optional("False") Boolean removeOnly, @Optional("0") Integer removedInstanceCount) {
// GIVEN
if (removeOnly) {
Assert.assertNotEquals(removedInstanceCount, 0);
}
IntegrationTestContext itContext = getItContext();
String stackId = itContext.getContextParam(CloudbreakITContextConstants.STACK_ID);
String ambariUser = itContext.getContextParam(CloudbreakITContextConstants.AMBARI_USER_ID);
String ambariPassword = itContext.getContextParam(CloudbreakITContextConstants.AMBARI_PASSWORD_ID);
String ambariPort = itContext.getContextParam(CloudbreakITContextConstants.AMBARI_PORT_ID);
Map<String, String> cloudProviderParams = itContext.getContextParam(CloudbreakITContextConstants.CLOUDPROVIDER_PARAMETERS, Map.class);
StackV1Endpoint stackV1Endpoint = getCloudbreakClient().stackV1Endpoint();
StackResponse stackResponse = stackV1Endpoint.get(Long.valueOf(stackId), new HashSet<>());
String instanceToDelete = RecoveryUtil.getInstanceId(stackResponse, hostGroup);
Assert.assertNotNull(instanceToDelete);
RecoveryUtil.deleteInstance(cloudProviderParams, instanceToDelete);
Integer expectedNodeCountAmbari = ScalingUtil.getNodeCountAmbari(stackV1Endpoint, ambariPort, stackId, ambariUser, ambariPassword, itContext) - removedInstanceCount;
WaitResult waitResult = CloudbreakUtil.waitForHostStatusStack(stackV1Endpoint, stackId, hostGroup, "UNHEALTHY");
if (waitResult == WaitResult.TIMEOUT) {
Assert.fail("Timeout happened when waiting for the desired host state");
}
// WHEN
List<String> hostgroupList = Arrays.asList(hostGroup.split(","));
ClusterRepairRequest clusterRepairRequest = new ClusterRepairRequest();
clusterRepairRequest.setHostGroups(hostgroupList);
clusterRepairRequest.setRemoveOnly(removeOnly);
getCloudbreakClient().clusterEndpoint().repairCluster(Long.valueOf(stackId), clusterRepairRequest);
// THEN
Map<String, String> desiredStatuses = new HashMap<>();
desiredStatuses.put("status", "AVAILABLE");
desiredStatuses.put("clusterStatus", "AVAILABLE");
CloudbreakUtil.waitAndCheckStatuses(getCloudbreakClient(), stackId, desiredStatuses);
Integer actualNodeCountAmbari = ScalingUtil.getNodeCountAmbari(stackV1Endpoint, ambariPort, stackId, ambariUser, ambariPassword, itContext);
Assert.assertEquals(expectedNodeCountAmbari, actualNodeCountAmbari);
}
use of com.sequenceiq.cloudbreak.api.endpoint.v1.StackV1Endpoint in project cloudbreak by hortonworks.
the class ScalingUtil method putInstanceCountToContext.
public static void putInstanceCountToContext(IntegrationTestContext itContext, String stackId) {
Collection<Map<String, Integer>> tmpInstanceCount = new ArrayList<>();
StackV1Endpoint stackV1Endpoint = itContext.getContextParam(CloudbreakITContextConstants.CLOUDBREAK_CLIENT, CloudbreakClient.class).stackV1Endpoint();
StackResponse stackResponse = stackV1Endpoint.get(Long.valueOf(stackId), new HashSet<>());
if (itContext.getContextParam(CloudbreakITContextConstants.INSTANCE_COUNT, List.class) != null) {
tmpInstanceCount = itContext.getContextParam(CloudbreakITContextConstants.INSTANCE_COUNT, List.class);
tmpInstanceCount.add(getNodeCountByHostgroup(stackResponse));
} else {
tmpInstanceCount.add(getNodeCountByHostgroup(stackResponse));
}
itContext.putContextParam(CloudbreakITContextConstants.INSTANCE_COUNT, tmpInstanceCount);
}
use of com.sequenceiq.cloudbreak.api.endpoint.v1.StackV1Endpoint in project cloudbreak by hortonworks.
the class AutoScalingPrometheusTest method testAutoscaling.
@Test
@Parameters({ "cooldown", "clusterMinSize", "clusterMaxSize", "policyName", "operator", "alertRuleName", "period", "threshold", "hostGroup", "scalingAdjustment" })
public void testAutoscaling(int cooldown, int clusterMinSize, int clusterMaxSize, String policyName, String operator, String alertRuleName, int period, Double threshold, String hostGroup, int scalingAdjustment) {
// GIVEN
itContext = getItContext();
String stackId = itContext.getContextParam(CloudbreakITContextConstants.STACK_ID);
StackV1Endpoint stackV1Endpoint = itContext.getContextParam(CloudbreakITContextConstants.CLOUDBREAK_CLIENT, CloudbreakClient.class).stackV1Endpoint();
String ambariUser = itContext.getContextParam(CloudbreakITContextConstants.AMBARI_USER_ID);
String ambariPassword = itContext.getContextParam(CloudbreakITContextConstants.AMBARI_PASSWORD_ID);
String ambariPort = itContext.getContextParam(CloudbreakITContextConstants.AMBARI_PORT_ID);
autoscaleClient = itContext.getContextParam(CloudbreakITContextConstants.AUTOSCALE_CLIENT, AutoscaleClient.class);
Long clusterId = AutoscalingUtil.getPeriscopeClusterId(autoscaleClient, stackId);
long currentTime = RecoveryUtil.getCurentTimeStamp();
int expectedNodeCountStack = ScalingUtil.getNodeCountStack(stackV1Endpoint, stackId) + scalingAdjustment;
int expectedNodeCountCluster = ScalingUtil.getNodeCountAmbari(stackV1Endpoint, ambariPort, stackId, ambariUser, ambariPassword, itContext) + scalingAdjustment;
// WHEN
AutoscalingUtil.configureAutoScaling(autoscaleClient, clusterId, cooldown, clusterMinSize, clusterMaxSize);
AutoscalingUtil.switchAutoscaling(autoscaleClient, clusterId, true);
AutoscalingUtil.createPrometheusAlert(autoscaleClient, clusterId, policyName, operator, alertRuleName, period, threshold);
Long alertId = AutoscalingUtil.getAlertId(autoscaleClient, clusterId, policyName);
AutoscalingUtil.setAlertsToContext(itContext, clusterId, alertId);
AutoscalingUtil.createPolicy(autoscaleClient, policyName, clusterId, alertId, hostGroup, scalingAdjustment);
// THEN
AutoscalingUtil.checkHistory(autoscaleClient, clusterId, currentTime);
AutoscalingUtil.checkScaling(itContext, getCloudbreakClient(), scalingAdjustment, stackId, expectedNodeCountStack, expectedNodeCountCluster);
}
use of com.sequenceiq.cloudbreak.api.endpoint.v1.StackV1Endpoint in project cloudbreak by hortonworks.
the class AutoscalingUtil method checkScaling.
static void checkScaling(IntegrationTestContext itContext, CloudbreakClient cloudbreakClient, int scalingAdjustment, String stackId, int expectedNodeCountStack, int expectedNodeCountCluster) {
String ambariUser = itContext.getContextParam(CloudbreakITContextConstants.AMBARI_USER_ID);
String ambariPassword = itContext.getContextParam(CloudbreakITContextConstants.AMBARI_PASSWORD_ID);
String ambariPort = itContext.getContextParam(CloudbreakITContextConstants.AMBARI_PORT_ID);
StackV1Endpoint stackV1Endpoint = itContext.getContextParam(CloudbreakITContextConstants.CLOUDBREAK_CLIENT, CloudbreakClient.class).stackV1Endpoint();
if (scalingAdjustment < 0) {
CloudbreakUtil.waitAndCheckClusterStatus(cloudbreakClient, stackId, "AVAILABLE");
CloudbreakUtil.waitAndCheckStackStatus(cloudbreakClient, stackId, "AVAILABLE");
} else {
CloudbreakUtil.waitAndCheckStackStatus(cloudbreakClient, stackId, "AVAILABLE");
CloudbreakUtil.waitAndCheckClusterStatus(cloudbreakClient, stackId, "AVAILABLE");
}
ScalingUtil.checkStackScaled(stackV1Endpoint, stackId, expectedNodeCountStack);
ScalingUtil.checkClusterScaled(stackV1Endpoint, ambariPort, stackId, ambariUser, ambariPassword, expectedNodeCountCluster, itContext);
ScalingUtil.putInstanceCountToContext(itContext, stackId);
}
use of com.sequenceiq.cloudbreak.api.endpoint.v1.StackV1Endpoint in project cloudbreak by hortonworks.
the class CloudbreakUtil method getFailedStatusReason.
public static String getFailedStatusReason(CloudbreakClient cloudbreakClient, String stackId, Map<String, String> desiredStatuses, Collection<WaitResult> desiredWaitResult) {
for (int i = 0; i < 3; i++) {
WaitResult waitResult = waitForStatuses(cloudbreakClient, stackId, desiredStatuses);
if (!desiredWaitResult.contains(waitResult)) {
Assert.fail("Expected status is failed, actual: " + waitResult);
} else {
StackV1Endpoint stackV1Endpoint = cloudbreakClient.stackV1Endpoint();
stackV1Endpoint.status(Long.valueOf(stackId));
return stackV1Endpoint.status(Long.valueOf(stackId)).get("statusReason").toString();
}
}
return "";
}
Aggregations