use of com.sequenceiq.cloudbreak.domain.InstanceGroup in project cloudbreak by hortonworks.
the class NetworkUtils method getPorts.
public static List<Port> getPorts(Optional<Stack> stack) {
List<Port> result = new ArrayList<>();
if (stack.isPresent()) {
Stack stackInstance = stack.get();
List<EndpointRule> aclRules = createACLRules(stackInstance);
for (InstanceGroup instanceGroup : stackInstance.getInstanceGroups()) {
for (SecurityRule rule : instanceGroup.getSecurityGroup().getSecurityRules()) {
for (String portNumber : rule.getPorts()) {
Port port = getPortByPortNumberAndProtocol(portNumber, rule.getProtocol());
if (port != null) {
result.add(new Port(port.getExposedService(), portNumber, portNumber, rule.getProtocol(), aclRules));
}
}
}
}
} else {
result.addAll(PORTS);
}
return result;
}
use of com.sequenceiq.cloudbreak.domain.InstanceGroup in project cloudbreak by hortonworks.
the class ClusterRepairFlowEventChainFactory method createFlowTriggerEventQueue.
@Override
public Queue<Selectable> createFlowTriggerEventQueue(ClusterRepairTriggerEvent event) {
StackView stackView = stackService.getByIdView(event.getStackId());
Queue<Selectable> flowChainTriggers = new ConcurrentLinkedDeque<>();
Map<String, List<String>> failedNodesMap = event.getFailedNodesMap();
for (Entry<String, List<String>> failedNodes : failedNodesMap.entrySet()) {
String hostGroupName = failedNodes.getKey();
List<String> hostNames = failedNodes.getValue();
HostGroup hostGroup = hostGroupService.getByClusterIdAndName(stackView.getClusterView().getId(), hostGroupName);
InstanceGroup instanceGroup = hostGroup.getConstraint().getInstanceGroup();
if (InstanceGroupType.GATEWAY.equals(instanceGroup.getInstanceGroupType())) {
List<InstanceMetaData> primary = instanceMetadataRepository.findAllByInstanceGroup(instanceGroup).stream().filter(imd -> hostNames.contains(imd.getDiscoveryFQDN()) && imd.getInstanceMetadataType() == InstanceMetadataType.GATEWAY_PRIMARY).collect(Collectors.toList());
if (!primary.isEmpty()) {
flowChainTriggers.add(new ChangePrimaryGatewayTriggerEvent(ChangePrimaryGatewayEvent.CHANGE_PRIMARY_GATEWAY_TRIGGER_EVENT.event(), event.getStackId(), event.accepted()));
}
}
flowChainTriggers.add(new ClusterAndStackDownscaleTriggerEvent(FlowChainTriggers.FULL_DOWNSCALE_TRIGGER_EVENT, event.getStackId(), hostGroupName, new HashSet<>(hostNames), ScalingType.DOWNSCALE_TOGETHER, event.accepted()));
if (!event.isRemoveOnly()) {
flowChainTriggers.add(new StackAndClusterUpscaleTriggerEvent(FlowChainTriggers.FULL_UPSCALE_TRIGGER_EVENT, event.getStackId(), hostGroupName, hostNames.size(), ScalingType.UPSCALE_TOGETHER, Sets.newHashSet(hostNames)));
// we need to update all ephemeral clusters that are connected to a datalake
if (InstanceGroupType.GATEWAY.equals(instanceGroup.getInstanceGroupType()) && !stackService.findClustersConnectedToDatalake(event.getStackId()).isEmpty()) {
flowChainTriggers.add(new EphemeralClustersUpgradeTriggerEvent(FlowChainTriggers.EPHEMERAL_CLUSTERS_UPDATE_TRIGGER_EVENT, event.getStackId(), event.accepted()));
}
}
}
return flowChainTriggers;
}
use of com.sequenceiq.cloudbreak.domain.InstanceGroup in project cloudbreak by hortonworks.
the class ServiceTestUtils method createStack.
public static Stack createStack(String owner, String account, Template template, Credential credential, Cluster cluster, Set<Resource> resources) {
Template template1 = createTemplate(AWS);
Template template2 = createTemplate(AWS);
Set<InstanceGroup> instanceGroups = new HashSet<>();
InstanceGroup instanceGroup1 = new InstanceGroup();
instanceGroup1.setNodeCount(2);
instanceGroup1.setGroupName("master");
instanceGroup1.setTemplate(template1);
instanceGroups.add(instanceGroup1);
InstanceGroup instanceGroup2 = new InstanceGroup();
instanceGroup2.setNodeCount(2);
instanceGroup2.setGroupName("slave_1");
instanceGroup2.setTemplate(template2);
instanceGroups.add(instanceGroup2);
Stack stack = new Stack();
stack.setCredential(credential);
stack.setRegion("EU_WEST_1");
stack.setOwner(owner);
stack.setAccount(account);
stack.setStackStatus(new StackStatus(stack, Status.REQUESTED, "", DetailedStackStatus.PROVISION_REQUESTED));
stack.setInstanceGroups(instanceGroups);
stack.setCluster(cluster);
stack.setPublicInAccount(true);
stack.setResources(resources);
return stack;
}
use of com.sequenceiq.cloudbreak.domain.InstanceGroup in project cloudbreak by hortonworks.
the class AccountPreferencesValidatorTest method testValidateShouldThrowExceptionWhenTheStackContainsNotAllowedInstanceTypes.
@Test
public void testValidateShouldThrowExceptionWhenTheStackContainsNotAllowedInstanceTypes() throws AccountPreferencesValidationException {
String n1St4Type = "n1-standard-4";
List<String> allowedInstanceTypes = Arrays.asList(n1St4Type, "n1-standard-8", "n1-standard-16");
when(preferences.getAllowedInstanceTypes()).thenReturn(allowedInstanceTypes);
InstanceGroup cbgateway = Mockito.mock(InstanceGroup.class, Mockito.RETURNS_DEEP_STUBS);
InstanceGroup master = Mockito.mock(InstanceGroup.class, Mockito.RETURNS_DEEP_STUBS);
InstanceGroup slave = Mockito.mock(InstanceGroup.class, Mockito.RETURNS_DEEP_STUBS);
when(cbgateway.getTemplate().getInstanceType()).thenReturn(n1St4Type);
when(master.getTemplate().getInstanceType()).thenReturn(n1St4Type);
when(slave.getTemplate().getInstanceType()).thenReturn("n1-standard-32");
when(stack.getInstanceGroups()).thenReturn(Sets.newHashSet(cbgateway, master, slave));
thrown.expect(AccountPreferencesValidationException.class);
thrown.expectMessage("The 'n1-standard-32' instance type isn't allowed within the account!");
underTest.validate(stack, EMPTY_STRING, EMPTY_STRING);
}
use of com.sequenceiq.cloudbreak.domain.InstanceGroup in project cloudbreak by hortonworks.
the class InstanceGroupMetadataCollectorTest method testCollectFqdnsWhenMetadataAvailable.
@Test
public void testCollectFqdnsWhenMetadataAvailable() {
Stack stack = TestUtil.stack();
for (InstanceGroup instanceGroup : stack.getInstanceGroups()) {
when(instanceMetadataRepository.findAliveInstancesInInstanceGroup(instanceGroup.getId())).thenReturn(Lists.newArrayList(instanceGroup.getInstanceMetaData().iterator()));
}
Map<String, List<InstanceMetaData>> stringListMap = underTest.collectMetadata(stack);
Assert.assertEquals(3, stringListMap.size());
Assert.assertTrue(stringListMap.keySet().containsAll(Sets.newHashSet("is1", "is2", "is3")));
verify(instanceMetadataRepository, times(3)).findAliveInstancesInInstanceGroup(anyLong());
}
Aggregations