use of com.sequenceiq.cloudbreak.domain.InstanceMetaData in project cloudbreak by hortonworks.
the class AbstractInstanceTerminationAction method createFlowContext.
@Override
protected InstanceTerminationContext createFlowContext(String flowId, StateContext<InstanceTerminationState, InstanceTerminationEvent> stateContext, P payload) {
Stack stack = stackService.getByIdWithLists(payload.getStackId());
MDCBuilder.buildMdcContext(stack);
Location location = location(region(stack.getRegion()), availabilityZone(stack.getAvailabilityZone()));
CloudContext cloudContext = new CloudContext(stack.getId(), stack.getName(), stack.cloudPlatform(), stack.getOwner(), stack.getPlatformVariant(), location);
CloudCredential cloudCredential = credentialConverter.convert(stack.getCredential());
Set<String> instanceIds = payload.getInstanceIds();
CloudStack cloudStack = cloudStackConverter.convert(stack, instanceIds);
List<CloudResource> cloudResources = cloudResourceConverter.convert(stack.getResources());
List<InstanceMetaData> instanceMetaDataList = new ArrayList<>();
List<CloudInstance> cloudInstances = new ArrayList<>();
for (String instanceId : instanceIds) {
InstanceMetaData instanceMetaData = instanceMetaDataRepository.findByInstanceId(stack.getId(), instanceId);
CloudInstance cloudInstance = metadataConverter.convert(instanceMetaData);
instanceMetaDataList.add(instanceMetaData);
cloudInstances.add(cloudInstance);
}
return new InstanceTerminationContext(flowId, stack, cloudContext, cloudCredential, cloudStack, cloudResources, cloudInstances, instanceMetaDataList);
}
use of com.sequenceiq.cloudbreak.domain.InstanceMetaData in project cloudbreak by hortonworks.
the class AmbariClusterHostServiceTypeTest method testUpdateHostsForDownscaleSelectMultipleNodesWithLessData.
@Test
public void testUpdateHostsForDownscaleSelectMultipleNodesWithLessData() throws Exception {
HostGroupAdjustmentJson json = new HostGroupAdjustmentJson();
json.setHostGroup("slave_1");
json.setScalingAdjustment(-2);
AmbariClient ambariClient = mock(AmbariClient.class);
HostMetadata metadata1 = mock(HostMetadata.class);
InstanceMetaData instanceMetaData1 = mock(InstanceMetaData.class);
HostMetadata metadata2 = mock(HostMetadata.class);
InstanceMetaData instanceMetaData2 = mock(InstanceMetaData.class);
HostMetadata metadata3 = mock(HostMetadata.class);
InstanceMetaData instanceMetaData3 = mock(InstanceMetaData.class);
HostMetadata metadata4 = mock(HostMetadata.class);
InstanceMetaData instanceMetaData4 = mock(InstanceMetaData.class);
Set<HostMetadata> hostsMetaData = new HashSet<>();
List<HostMetadata> hostsMetadataList = asList(metadata1, metadata2, metadata3, metadata4);
hostsMetaData.addAll(hostsMetadataList);
HostGroup hostGroup = new HostGroup();
hostGroup.setHostMetadata(hostsMetaData);
hostGroup.setName("slave_1");
Map<String, Map<Long, Long>> dfsSpace = new HashMap<>();
dfsSpace.put("node2", singletonMap(85_000L, 15_000L));
dfsSpace.put("node1", singletonMap(90_000L, 10_000L));
dfsSpace.put("node3", singletonMap(80_000L, 20_000L));
dfsSpace.put("node4", singletonMap(90_000L, 10_000L));
when(metadata1.getHostName()).thenReturn("node1");
when(metadata2.getHostName()).thenReturn("node2");
when(metadata3.getHostName()).thenReturn("node3");
when(metadata3.getHostName()).thenReturn("node4");
when(instanceMetaData1.getAmbariServer()).thenReturn(false);
when(instanceMetaData2.getAmbariServer()).thenReturn(false);
when(instanceMetaData3.getAmbariServer()).thenReturn(false);
when(instanceMetaData4.getAmbariServer()).thenReturn(false);
when(ambariClientProvider.getAmbariClient(any(HttpClientConfig.class), anyInt(), any(Cluster.class))).thenReturn(ambariClient);
when(ambariClient.getComponentsCategory("multi-node-yarn", "slave_1")).thenReturn(singletonMap("DATANODE", "SLAVE"));
when(ambariClient.getBlueprintMap(cluster.getBlueprint().getAmbariName())).thenReturn(singletonMap("slave_1", singletonList("DATANODE")));
when(ambariClient.getDFSSpace()).thenReturn(dfsSpace);
when(instanceMetadataRepository.findHostInStack(stack.getId(), "node1")).thenReturn(instanceMetaData1);
when(instanceMetadataRepository.findHostInStack(stack.getId(), "node2")).thenReturn(instanceMetaData2);
when(instanceMetadataRepository.findHostInStack(stack.getId(), "node3")).thenReturn(instanceMetaData3);
when(instanceMetadataRepository.findHostInStack(stack.getId(), "node4")).thenReturn(instanceMetaData3);
when(hostGroupService.getByClusterIdAndName(anyLong(), anyString())).thenReturn(hostGroup);
when(statusToPollGroupConverter.convert(any(Status.class))).thenReturn(PollGroup.POLLABLE);
underTest.updateHosts(stack.getId(), json);
verify(flowManager, times(1)).triggerClusterDownscale(stack.getId(), json);
verify(blueprintValidator, times(1)).validateHostGroupScalingRequest(stack.getCluster().getBlueprint(), hostGroup, json.getScalingAdjustment());
}
use of com.sequenceiq.cloudbreak.domain.InstanceMetaData in project cloudbreak by hortonworks.
the class AmbariClusterHostServiceTypeTest method testUpdateHostsForDownscaleSelectNodesWithLessData.
@Test
public void testUpdateHostsForDownscaleSelectNodesWithLessData() throws CloudbreakSecuritySetupException {
HostGroupAdjustmentJson json = new HostGroupAdjustmentJson();
json.setHostGroup("slave_1");
json.setScalingAdjustment(-1);
AmbariClient ambariClient = mock(AmbariClient.class);
HostMetadata metadata1 = mock(HostMetadata.class);
InstanceMetaData instanceMetaData1 = mock(InstanceMetaData.class);
HostMetadata metadata2 = mock(HostMetadata.class);
InstanceMetaData instanceMetaData2 = mock(InstanceMetaData.class);
HostMetadata metadata3 = mock(HostMetadata.class);
InstanceMetaData instanceMetaData3 = mock(InstanceMetaData.class);
Set<HostMetadata> hostsMetaData = new HashSet<>();
List<HostMetadata> hostsMetadataList = asList(metadata1, metadata2, metadata3);
hostsMetaData.addAll(hostsMetadataList);
HostGroup hostGroup = new HostGroup();
hostGroup.setHostMetadata(hostsMetaData);
hostGroup.setName("slave_1");
Map<String, Map<Long, Long>> dfsSpace = new HashMap<>();
dfsSpace.put("node2", singletonMap(85_000L, 15_000L));
dfsSpace.put("node1", singletonMap(90_000L, 10_000L));
dfsSpace.put("node3", singletonMap(80_000L, 20_000L));
when(metadata1.getHostName()).thenReturn("node1");
when(metadata2.getHostName()).thenReturn("node2");
when(metadata3.getHostName()).thenReturn("node3");
when(instanceMetaData1.getAmbariServer()).thenReturn(false);
when(instanceMetaData2.getAmbariServer()).thenReturn(false);
when(instanceMetaData3.getAmbariServer()).thenReturn(false);
when(ambariClientProvider.getAmbariClient(any(HttpClientConfig.class), anyInt(), any(Cluster.class))).thenReturn(ambariClient);
when(ambariClient.getComponentsCategory("multi-node-yarn", "slave_1")).thenReturn(singletonMap("DATANODE", "SLAVE"));
when(ambariClient.getBlueprintMap(cluster.getBlueprint().getAmbariName())).thenReturn(singletonMap("slave_1", singletonList("DATANODE")));
when(ambariClient.getDFSSpace()).thenReturn(dfsSpace);
when(instanceMetadataRepository.findHostInStack(stack.getId(), "node1")).thenReturn(instanceMetaData1);
when(instanceMetadataRepository.findHostInStack(stack.getId(), "node2")).thenReturn(instanceMetaData2);
when(instanceMetadataRepository.findHostInStack(stack.getId(), "node3")).thenReturn(instanceMetaData3);
when(hostGroupService.getByClusterIdAndName(anyLong(), anyString())).thenReturn(hostGroup);
when(statusToPollGroupConverter.convert(any(Status.class))).thenReturn(PollGroup.POLLABLE);
underTest.updateHosts(stack.getId(), json);
verify(flowManager, times(1)).triggerClusterDownscale(stack.getId(), json);
verify(blueprintValidator, times(1)).validateHostGroupScalingRequest(stack.getCluster().getBlueprint(), hostGroup, json.getScalingAdjustment());
}
use of com.sequenceiq.cloudbreak.domain.InstanceMetaData in project cloudbreak by hortonworks.
the class AmbariClusterHostServiceTypeTest method testUpdateHostsForDownscaleWhenRemainingSpaceIsNotEnough.
@Test
public void testUpdateHostsForDownscaleWhenRemainingSpaceIsNotEnough() throws Exception {
HostGroupAdjustmentJson json = new HostGroupAdjustmentJson();
json.setHostGroup("slave_1");
json.setScalingAdjustment(-1);
AmbariClient ambariClient = mock(AmbariClient.class);
HostMetadata metadata1 = mock(HostMetadata.class);
InstanceMetaData instanceMetaData1 = mock(InstanceMetaData.class);
HostMetadata metadata2 = mock(HostMetadata.class);
InstanceMetaData instanceMetaData2 = mock(InstanceMetaData.class);
HostMetadata metadata3 = mock(HostMetadata.class);
InstanceMetaData instanceMetaData3 = mock(InstanceMetaData.class);
Set<HostMetadata> hostsMetaData = new HashSet<>();
List<HostMetadata> hostsMetadataList = asList(metadata1, metadata2, metadata3);
hostsMetaData.addAll(hostsMetadataList);
HostGroup hostGroup = new HostGroup();
hostGroup.setHostMetadata(hostsMetaData);
hostGroup.setName("slave_1");
Map<String, Map<Long, Long>> dfsSpace = new HashMap<>();
dfsSpace.put("node2", singletonMap(5_000L, 15_000L));
dfsSpace.put("node1", singletonMap(10_000L, 10_000L));
dfsSpace.put("node3", singletonMap(6_000L, 20_000L));
when(metadata1.getHostName()).thenReturn("node1");
when(metadata2.getHostName()).thenReturn("node2");
when(metadata3.getHostName()).thenReturn("node3");
when(instanceMetaData1.getAmbariServer()).thenReturn(false);
when(instanceMetaData2.getAmbariServer()).thenReturn(false);
when(instanceMetaData3.getAmbariServer()).thenReturn(false);
when(ambariClientProvider.getAmbariClient(any(HttpClientConfig.class), anyInt(), any(Cluster.class))).thenReturn(ambariClient);
when(ambariClient.getComponentsCategory("multi-node-yarn", "slave_1")).thenReturn(singletonMap("DATANODE", "SLAVE"));
when(ambariClient.getBlueprintMap(cluster.getBlueprint().getAmbariName())).thenReturn(singletonMap("slave_1", singletonList("DATANODE")));
when(ambariClient.getDFSSpace()).thenReturn(dfsSpace);
when(hostGroupService.getByClusterIdAndName(anyLong(), anyString())).thenReturn(hostGroup);
when(instanceMetadataRepository.findHostInStack(stack.getId(), "node1")).thenReturn(instanceMetaData1);
when(instanceMetadataRepository.findHostInStack(stack.getId(), "node2")).thenReturn(instanceMetaData2);
when(instanceMetadataRepository.findHostInStack(stack.getId(), "node3")).thenReturn(instanceMetaData3);
when(statusToPollGroupConverter.convert(any(Status.class))).thenReturn(PollGroup.POLLABLE);
underTest.updateHosts(stack.getId(), json);
verify(flowManager, times(1)).triggerClusterDownscale(stack.getId(), json);
verify(blueprintValidator, times(1)).validateHostGroupScalingRequest(stack.getCluster().getBlueprint(), hostGroup, json.getScalingAdjustment());
}
use of com.sequenceiq.cloudbreak.domain.InstanceMetaData in project cloudbreak by hortonworks.
the class AmbariClusterCreationSuccessHandlerTest method testHandleClusterCreationSuccessWhenEverythingGoesFine.
@Test
public void testHandleClusterCreationSuccessWhenEverythingGoesFine() {
Stack stack = TestUtil.stack();
Cluster cluster = TestUtil.cluster();
Set<HostMetadata> hostMetadataList = new HashSet<>();
cluster.getHostGroups().forEach(hostGroup -> hostGroup.getHostMetadata().forEach(hostMetadataList::add));
List<InstanceMetaData> instanceMetaDataList = new ArrayList<>();
stack.getInstanceGroups().forEach(instanceGroup -> instanceGroup.getInstanceMetaData().forEach(instanceMetaDataList::add));
when(clusterService.updateCluster(cluster)).thenReturn(cluster);
when(instanceMetadataRepository.save(anyCollection())).thenReturn(instanceMetaDataList);
when(hostMetadataRepository.findHostsInCluster(cluster.getId())).thenReturn(hostMetadataList);
when(hostMetadataRepository.save(anyCollection())).thenReturn(hostMetadataList);
underTest.handleClusterCreationSuccess(stack, cluster);
ArgumentCaptor<Cluster> clusterCaptor = ArgumentCaptor.forClass(Cluster.class);
verify(clusterService, times(1)).updateCluster(clusterCaptor.capture());
assertNotNull(clusterCaptor.getValue().getCreationFinished());
assertNotNull(clusterCaptor.getValue().getUpSince());
ArgumentCaptor<List> instanceMetadataCaptor = ArgumentCaptor.forClass(List.class);
verify(instanceMetadataRepository, times(1)).save(instanceMetadataCaptor.capture());
for (InstanceMetaData instanceMetaData : (List<InstanceMetaData>) instanceMetadataCaptor.getValue()) {
Assert.assertEquals(InstanceStatus.REGISTERED, instanceMetaData.getInstanceStatus());
}
ArgumentCaptor<List> hostMetadataCaptor = ArgumentCaptor.forClass(List.class);
verify(hostMetadataRepository, times(1)).save(hostMetadataCaptor.capture());
for (HostMetadata hostMetadata : (List<HostMetadata>) hostMetadataCaptor.getValue()) {
Assert.assertEquals(HostMetadataState.HEALTHY, hostMetadata.getHostMetadataState());
}
verify(hostMetadataRepository, times(1)).findHostsInCluster(cluster.getId());
}
Aggregations