use of com.sequenceiq.cloudbreak.api.endpoint.v1.StackV1Endpoint in project cloudbreak by hortonworks.
the class TagsTest method testTagTest.
@Test
@Parameters("tags")
public void testTagTest(String tags) throws Exception {
// GIVEN
IntegrationTestContext itContext = getItContext();
String stackId = itContext.getContextParam(CloudbreakITContextConstants.STACK_ID);
Map<String, String> cloudProviderParams = itContext.getContextParam(CloudbreakITContextConstants.CLOUDPROVIDER_PARAMETERS, Map.class);
StackV1Endpoint stackV1Endpoint = itContext.getContextParam(CloudbreakITContextConstants.CLOUDBREAK_CLIENT, CloudbreakClient.class).stackV1Endpoint();
StackResponse stackResponse = stackV1Endpoint.get(Long.valueOf(stackId), new HashSet<>());
Map<String, String> userDefinedTagsStack = TagsUtil.checkTagsStack(stackResponse);
Map<String, String> tagsToCheckMap = TagsUtil.getTagsToCheck(tags);
// WHEN: The cluster was created with user-defined tags
// THEN
TagsUtil.checkTags(tagsToCheckMap, userDefinedTagsStack);
List<String> instanceIdList = TagsUtil.getInstancesList(stackResponse);
TagsUtil.checkTagsWithProvider(stackResponse.getName(), cloudProviderParams, applicationContext, instanceIdList, tagsToCheckMap);
}
use of com.sequenceiq.cloudbreak.api.endpoint.v1.StackV1Endpoint in project cloudbreak by hortonworks.
the class MockClusterCreationWithSaltFailTest method testClusterCreation.
@Test
@Parameters({ "clusterName", "ambariPort", "ambariUser", "ambariPassword", "emailNeeded", "enableSecurity", "kerberosMasterKey", "kerberosAdmin", "kerberosPassword", "runRecipesOnHosts", "checkAmbari", "mockPort" })
public void testClusterCreation(@Optional("it-cluster") String clusterName, @Optional("8080") String ambariPort, @Optional("admin") String ambariUser, @Optional("admin123!@#") String ambariPassword, @Optional("false") boolean emailNeeded, @Optional("false") boolean enableSecurity, @Optional String kerberosMasterKey, @Optional String kerberosAdmin, @Optional String kerberosPassword, @Optional("") String runRecipesOnHosts, @Optional("true") boolean checkAmbari, @Optional("9443") int mockPort) throws Exception {
// GIVEN
IntegrationTestContext itContext = getItContext();
String stackIdStr = itContext.getContextParam(CloudbreakITContextConstants.STACK_ID);
Integer stackId = Integer.valueOf(stackIdStr);
Integer blueprintId = Integer.valueOf(itContext.getContextParam(CloudbreakITContextConstants.BLUEPRINT_ID));
List<HostGroup> hostgroups = itContext.getContextParam(CloudbreakITContextConstants.HOSTGROUP_ID, List.class);
Set<HostGroupRequest> hostGroupJsons1 = convertHostGroups(hostgroups, runRecipesOnHosts);
itContext.putContextParam(CloudbreakITContextConstants.AMBARI_USER_ID, ambariUser);
itContext.putContextParam(CloudbreakITContextConstants.AMBARI_PASSWORD_ID, ambariPassword);
// WHEN
ClusterRequest clusterRequest = new ClusterRequest();
clusterRequest.setName(clusterName);
clusterRequest.setDescription("Cluster for integration test");
clusterRequest.setEnableSecurity(enableSecurity);
clusterRequest.setPassword(ambariPassword);
clusterRequest.setUserName(ambariUser);
clusterRequest.setBlueprintId(Long.valueOf(blueprintId));
clusterRequest.setHostGroups(hostGroupJsons1);
KerberosRequest kerberosRequest = new KerberosRequest();
kerberosRequest.setAdmin(kerberosAdmin);
kerberosRequest.setPassword(kerberosPassword);
kerberosRequest.setMasterKey(kerberosMasterKey);
clusterRequest.setKerberos(kerberosRequest);
initSpark();
Map<String, CloudVmMetaDataStatus> instanceMap = itContext.getContextParam(CloudbreakITContextConstants.MOCK_INSTANCE_MAP, Map.class);
addSaltMappings(instanceMap);
addAmbariMappings(instanceMap);
ClusterV1Endpoint clusterV1Endpoint = getCloudbreakClient().clusterEndpoint();
Long clusterId = clusterV1Endpoint.post(Long.valueOf(stackId), clusterRequest).getId();
// THEN
Assert.assertNotNull(clusterId);
CloudbreakUtil.waitAndCheckStackStatus(getCloudbreakClient(), stackIdStr, "AVAILABLE");
String failMessage = "Source file salt://ambari/scripts/ambari-server-initttt.sh not found | " + "Service ambari-server is already enabled, and is dead | " + "Package haveged is already installed.";
CloudbreakUtil.checkClusterFailed(getCloudbreakClient().stackV1Endpoint(), stackIdStr, failMessage);
}
use of com.sequenceiq.cloudbreak.api.endpoint.v1.StackV1Endpoint in project cloudbreak by hortonworks.
the class MockClusterCreationWithSaltSuccessTest method testClusterCreation.
@Test
@Parameters({ "clusterName", "ambariPort", "ambariUser", "ambariPassword", "emailNeeded", "enableSecurity", "kerberosMasterKey", "kerberosAdmin", "kerberosPassword", "runRecipesOnHosts", "checkAmbari", "mockPort" })
public void testClusterCreation(@Optional("it-cluster") String clusterName, @Optional("8080") String ambariPort, @Optional("admin") String ambariUser, @Optional("admin123!@#") String ambariPassword, @Optional("false") boolean emailNeeded, @Optional("false") boolean enableSecurity, @Optional String kerberosMasterKey, @Optional String kerberosAdmin, @Optional String kerberosPassword, @Optional("") String runRecipesOnHosts, @Optional("true") boolean checkAmbari, @Optional("9443") int mockPort) throws Exception {
// GIVEN
IntegrationTestContext itContext = getItContext();
String stackIdStr = itContext.getContextParam(CloudbreakITContextConstants.STACK_ID);
Integer stackId = Integer.valueOf(stackIdStr);
Integer blueprintId = Integer.valueOf(itContext.getContextParam(CloudbreakITContextConstants.BLUEPRINT_ID));
List<HostGroup> hostgroups = itContext.getContextParam(CloudbreakITContextConstants.HOSTGROUP_ID, List.class);
Set<HostGroupRequest> hostGroupJsons1 = convertHostGroups(hostgroups, runRecipesOnHosts);
itContext.putContextParam(CloudbreakITContextConstants.AMBARI_USER_ID, ambariUser);
itContext.putContextParam(CloudbreakITContextConstants.AMBARI_PASSWORD_ID, ambariPassword);
// WHEN
ClusterRequest clusterRequest = new ClusterRequest();
clusterRequest.setName(clusterName);
clusterRequest.setDescription("Cluster for integration test");
clusterRequest.setEmailNeeded(emailNeeded);
clusterRequest.setEnableSecurity(enableSecurity);
clusterRequest.setPassword(ambariPassword);
clusterRequest.setUserName(ambariUser);
clusterRequest.setBlueprintId(Long.valueOf(blueprintId));
clusterRequest.setHostGroups(hostGroupJsons1);
if (enableSecurity) {
KerberosRequest kerberosRequest = new KerberosRequest();
kerberosRequest.setAdmin(kerberosAdmin);
kerberosRequest.setPassword(kerberosPassword);
kerberosRequest.setMasterKey(kerberosMasterKey);
clusterRequest.setKerberos(kerberosRequest);
}
GatewayJson gatewayJson = new GatewayJson();
gatewayJson.setEnableGateway(Boolean.TRUE);
gatewayJson.setExposedServices(ImmutableList.of("ALL"));
clusterRequest.setGateway(gatewayJson);
ClusterV1Endpoint clusterV1Endpoint = getCloudbreakClient().clusterEndpoint();
Long clusterId = clusterV1Endpoint.post(Long.valueOf(stackId), clusterRequest).getId();
// THEN
Assert.assertNotNull(clusterId);
CloudbreakUtil.waitAndCheckStackStatus(getCloudbreakClient(), stackIdStr, "AVAILABLE");
CloudbreakUtil.checkClusterAvailability(getCloudbreakClient().stackV1Endpoint(), ambariPort, stackIdStr, ambariUser, ambariPassword, checkAmbari);
StackCreationMock stackCreationMock = getItContext().getContextParam(CloudbreakV2Constants.MOCK_SERVER, StackCreationMock.class);
stackCreationMock.verifyCalls(clusterName);
}
use of com.sequenceiq.cloudbreak.api.endpoint.v1.StackV1Endpoint in project cloudbreak by hortonworks.
the class CountRecipeResultsTest method testFetchRecipeResult.
@Test
@Parameters({ "searchRecipesOnHosts", "lookingFor", "require" })
public void testFetchRecipeResult(String searchRecipesOnHosts, String lookingFor, Integer require) throws Exception {
// GIVEN
Assert.assertEquals(new File(defaultPrivateKeyFile).exists(), true, "Private cert file not found: " + defaultPrivateKeyFile);
Assert.assertFalse(lookingFor.isEmpty());
IntegrationTestContext itContext = getItContext();
String stackId = itContext.getContextParam(CloudbreakITContextConstants.STACK_ID);
StackV1Endpoint stackV1Endpoint = itContext.getContextParam(CloudbreakITContextConstants.CLOUDBREAK_CLIENT, CloudbreakClient.class).stackV1Endpoint();
List<InstanceGroupResponse> instanceGroups = stackV1Endpoint.get(Long.valueOf(stackId), new HashSet<>()).getInstanceGroups();
String[] files = lookingFor.split(",");
List<String> publicIps = getPublicIps(instanceGroups, Arrays.asList(searchRecipesOnHosts.split(",")));
Collection<Future<Boolean>> futures = new ArrayList<>(publicIps.size() * files.length);
ExecutorService executorService = Executors.newFixedThreadPool(publicIps.size());
AtomicInteger count = new AtomicInteger(0);
// WHEN
try {
for (String file : files) {
for (String ip : publicIps) {
futures.add((Future<Boolean>) executorService.submit(() -> {
if (findFile(ip, file)) {
count.incrementAndGet();
}
}));
}
}
for (Future<Boolean> future : futures) {
future.get();
}
} finally {
executorService.shutdown();
}
// THEN
Assert.assertEquals(count.get(), require.intValue(), "The number of existing files is different than required.");
}
use of com.sequenceiq.cloudbreak.api.endpoint.v1.StackV1Endpoint in project cloudbreak by hortonworks.
the class AutoRecoveryTest method testAutoRecovery.
@Test
@Parameters({ "hostGroup", "removedInstanceCount" })
public void testAutoRecovery(String hostGroup, @Optional("0") Integer removedInstanceCount) {
// GIVEN
IntegrationTestContext itContext = getItContext();
String stackId = itContext.getContextParam(CloudbreakITContextConstants.STACK_ID);
String ambariUser = itContext.getContextParam(CloudbreakITContextConstants.AMBARI_USER_ID);
String ambariPassword = itContext.getContextParam(CloudbreakITContextConstants.AMBARI_PASSWORD_ID);
String ambariPort = itContext.getContextParam(CloudbreakITContextConstants.AMBARI_PORT_ID);
Map<String, String> cloudProviderParams = itContext.getContextParam(CloudbreakITContextConstants.CLOUDPROVIDER_PARAMETERS, Map.class);
StackV1Endpoint stackV1Endpoint = getCloudbreakClient().stackV1Endpoint();
StackResponse stackResponse = stackV1Endpoint.get(Long.valueOf(stackId), new HashSet<>());
String instanceToDelete = RecoveryUtil.getInstanceId(stackResponse, hostGroup);
Assert.assertNotNull(instanceToDelete);
RecoveryUtil.deleteInstance(cloudProviderParams, instanceToDelete);
Integer expectedNodeCountAmbari = ScalingUtil.getNodeCountAmbari(stackV1Endpoint, ambariPort, stackId, ambariUser, ambariPassword, itContext) - removedInstanceCount;
WaitResult waitResult = CloudbreakUtil.waitForEvent(getCloudbreakClient(), stackResponse.getName(), "RECOVERY", "autorecovery requested", RecoveryUtil.getCurentTimeStamp());
if (waitResult == WaitResult.TIMEOUT) {
Assert.fail("Timeout happened when waiting for the desired host state");
}
// WHEN: Cloudbreak automatically starts the recover
// THEN
Map<String, String> desiredStatuses = new HashMap<>();
desiredStatuses.put("status", "AVAILABLE");
desiredStatuses.put("clusterStatus", "AVAILABLE");
CloudbreakUtil.waitAndCheckStatuses(getCloudbreakClient(), stackId, desiredStatuses);
Integer actualNodeCountAmbari = ScalingUtil.getNodeCountAmbari(stackV1Endpoint, ambariPort, stackId, ambariUser, ambariPassword, itContext);
Assert.assertEquals(expectedNodeCountAmbari, actualNodeCountAmbari);
}
Aggregations