use of com.sequenceiq.cloudbreak.domain.view.StackView in project cloudbreak by hortonworks.
the class AbstractClusterResetAction method createFlowContext.
@Override
protected ClusterViewContext createFlowContext(String flowId, StateContext<ClusterResetState, ClusterResetEvent> stateContext, P payload) {
StackView stack = stackService.getByIdView(payload.getStackId());
MDCBuilder.buildMdcContext(stack.getId().toString(), stack.getName(), stack.getOwner(), "CLUSTER");
return new ClusterViewContext(flowId, stack);
}
use of com.sequenceiq.cloudbreak.domain.view.StackView in project cloudbreak by hortonworks.
the class ClusterRepairFlowEventChainFactory method createFlowTriggerEventQueue.
@Override
public Queue<Selectable> createFlowTriggerEventQueue(ClusterRepairTriggerEvent event) {
StackView stackView = stackService.getByIdView(event.getStackId());
Queue<Selectable> flowChainTriggers = new ConcurrentLinkedDeque<>();
Map<String, List<String>> failedNodesMap = event.getFailedNodesMap();
for (Entry<String, List<String>> failedNodes : failedNodesMap.entrySet()) {
String hostGroupName = failedNodes.getKey();
List<String> hostNames = failedNodes.getValue();
HostGroup hostGroup = hostGroupService.getByClusterIdAndName(stackView.getClusterView().getId(), hostGroupName);
InstanceGroup instanceGroup = hostGroup.getConstraint().getInstanceGroup();
if (InstanceGroupType.GATEWAY.equals(instanceGroup.getInstanceGroupType())) {
List<InstanceMetaData> primary = instanceMetadataRepository.findAllByInstanceGroup(instanceGroup).stream().filter(imd -> hostNames.contains(imd.getDiscoveryFQDN()) && imd.getInstanceMetadataType() == InstanceMetadataType.GATEWAY_PRIMARY).collect(Collectors.toList());
if (!primary.isEmpty()) {
flowChainTriggers.add(new ChangePrimaryGatewayTriggerEvent(ChangePrimaryGatewayEvent.CHANGE_PRIMARY_GATEWAY_TRIGGER_EVENT.event(), event.getStackId(), event.accepted()));
}
}
flowChainTriggers.add(new ClusterAndStackDownscaleTriggerEvent(FlowChainTriggers.FULL_DOWNSCALE_TRIGGER_EVENT, event.getStackId(), hostGroupName, new HashSet<>(hostNames), ScalingType.DOWNSCALE_TOGETHER, event.accepted()));
if (!event.isRemoveOnly()) {
flowChainTriggers.add(new StackAndClusterUpscaleTriggerEvent(FlowChainTriggers.FULL_UPSCALE_TRIGGER_EVENT, event.getStackId(), hostGroupName, hostNames.size(), ScalingType.UPSCALE_TOGETHER, Sets.newHashSet(hostNames)));
// we need to update all ephemeral clusters that are connected to a datalake
if (InstanceGroupType.GATEWAY.equals(instanceGroup.getInstanceGroupType()) && !stackService.findClustersConnectedToDatalake(event.getStackId()).isEmpty()) {
flowChainTriggers.add(new EphemeralClustersUpgradeTriggerEvent(FlowChainTriggers.EPHEMERAL_CLUSTERS_UPDATE_TRIGGER_EVENT, event.getStackId(), event.accepted()));
}
}
}
return flowChainTriggers;
}
use of com.sequenceiq.cloudbreak.domain.view.StackView in project cloudbreak by hortonworks.
the class UpscaleFlowEventChainFactory method createFlowTriggerEventQueue.
@Override
public Queue<Selectable> createFlowTriggerEventQueue(StackAndClusterUpscaleTriggerEvent event) {
StackView stackView = stackService.getByIdView(event.getStackId());
ClusterView clusterView = stackView.getClusterView();
Queue<Selectable> flowEventChain = new ConcurrentLinkedQueue<>();
flowEventChain.add(new StackSyncTriggerEvent(STACK_SYNC_EVENT.event(), event.getStackId(), false, event.accepted()));
flowEventChain.add(new StackScaleTriggerEvent(ADD_INSTANCES_EVENT.event(), event.getStackId(), event.getInstanceGroup(), event.getAdjustment(), event.getHostNames()));
if (ScalingType.isClusterUpScale(event.getScalingType()) && clusterView != null) {
HostGroup hostGroup = hostGroupService.getByClusterIdAndInstanceGroupName(clusterView.getId(), event.getInstanceGroup());
flowEventChain.add(new ClusterScaleTriggerEvent(CLUSTER_UPSCALE_TRIGGER_EVENT.event(), stackView.getId(), hostGroup.getName(), event.getAdjustment()));
}
return flowEventChain;
}
use of com.sequenceiq.cloudbreak.domain.view.StackView in project cloudbreak by hortonworks.
the class StackTerminationFailureAction method createFlowContext.
@Override
protected StackFailureContext createFlowContext(String flowId, StateContext<StackTerminationState, StackTerminationEvent> stateContext, StackFailureEvent payload) {
Flow flow = getFlow(flowId);
StackView stackView = stackService.getByIdView(payload.getStackId());
MDCBuilder.buildMdcContext(stackView);
flow.setFlowFailed(payload.getException());
return new StackFailureContext(flowId, stackView);
}
use of com.sequenceiq.cloudbreak.domain.view.StackView in project cloudbreak by hortonworks.
the class FlexUsageGenerator method getFlexUsageHdpInstances.
private List<FlexUsageHdpInstanceJson> getFlexUsageHdpInstances(Iterable<CloudbreakUsage> usages) {
Map<Long, FlexUsageHdpInstanceJson> flexUsageJsonsByStackId = new HashMap<>();
for (CloudbreakUsage usage : usages) {
Long stackId = usage.getStackId();
if (!flexUsageJsonsByStackId.containsKey(stackId)) {
FlexUsageHdpInstanceJson usageJson = new FlexUsageHdpInstanceJson();
usageJson.setGuid(usage.getStackUuid());
usageJson.setParentGuid(usage.getParentUuid());
usageJson.setClusterName(usage.getStackName());
usageJson.setBlueprintName(usage.getBlueprintName());
usageJson.setFlexSubscriptionId(usage.getFlexId());
usageJson.setProvider(usage.getProvider());
usageJson.setRegion(usage.getRegion());
usageJson.setPeakUsage(usage.getPeak());
usageJson.setUsageDate(formatInstant(usage.getDay().toInstant(), FLEX_USAGE_DAY_FORMAT_PATTERN));
StackView stack = stackService.getByIdView(usage.getStackId());
usageJson.setCreationTime(formatInstant(Instant.ofEpochMilli(stack.getCreated()), FLEX_TIME_ZONE_FORMAT_PATTERN));
usageJson.setTerminationTime(getTerminationTime(stack));
flexUsageJsonsByStackId.put(stackId, usageJson);
} else {
FlexUsageHdpInstanceJson usageJson = flexUsageJsonsByStackId.get(stackId);
Integer actPeak = usage.getPeak() != null ? usage.getPeak() : 0;
Integer peak = usageJson.getPeakUsage() != null ? usageJson.getPeakUsage() : 0;
int newPeak = peak + actPeak;
usageJson.setPeakUsage(newPeak);
}
}
return new ArrayList<>(flexUsageJsonsByStackId.values());
}
Aggregations