use of com.sequenceiq.flow.core.Flow in project cloudbreak by hortonworks.
the class DatalakeRestoreActions method restoreFailed.
@Bean(name = "DATALAKE_RESTORE_FAILED_STATE")
public Action<?, ?> restoreFailed() {
return new AbstractSdxAction<>(DatalakeRestoreFailedEvent.class) {
@Override
protected SdxContext createFlowContext(FlowParameters flowParameters, StateContext<FlowState, FlowEvent> stateContext, DatalakeRestoreFailedEvent payload) {
return SdxContext.from(flowParameters, payload);
}
@Override
protected void doExecute(SdxContext context, DatalakeRestoreFailedEvent payload, Map<Object, Object> variables) {
Exception exception = payload.getException();
LOGGER.error("Datalake database restore could not be started for datalake with id: {}", payload.getResourceId(), exception);
SdxCluster sdxCluster = sdxStatusService.setStatusForDatalakeAndNotify(DatalakeStatusEnum.RUNNING, ResourceEvent.DATALAKE_RESTORE_FINISHED, "Datalake is running, Datalake restore failed", payload.getResourceId());
eventSenderService.sendEventAndNotification(sdxCluster, context.getFlowTriggerUserCrn(), ResourceEvent.DATALAKE_RESTORE_FAILED, List.of(exception.getMessage()));
Flow flow = getFlow(context.getFlowParameters().getFlowId());
flow.setFlowFailed(payload.getException());
metricService.incrementMetricCounter(MetricType.SDX_RESTORE_FAILED, sdxCluster);
sendEvent(context, DATALAKE_RESTORE_FAILURE_HANDLED_EVENT.event(), payload);
}
@Override
protected Object getFailurePayload(DatalakeRestoreFailedEvent payload, Optional<SdxContext> flowContext, Exception ex) {
return null;
}
};
}
use of com.sequenceiq.flow.core.Flow in project cloudbreak by hortonworks.
the class EnvClustersDeleteActions method failedAction.
@Bean(name = "ENV_CLUSTERS_DELETE_FAILED_STATE")
public Action<?, ?> failedAction() {
return new AbstractEnvClustersDeleteAction<>(EnvClusterDeleteFailedEvent.class) {
@Override
protected void doExecute(CommonContext context, EnvClusterDeleteFailedEvent payload, Map<Object, Object> variables) {
LOGGER.warn("Failed to delete environment", payload.getException());
Exception e = payload.getException();
environmentService.findEnvironmentById(payload.getResourceId()).ifPresentOrElse(environment -> {
environment.setStatusReason(payload.getMessage() == null ? e.getMessage() : payload.getMessage());
environment.setStatus(EnvironmentStatus.DELETE_FAILED);
Environment result = environmentService.save(environment);
EnvironmentDto environmentDto = environmentService.getEnvironmentDto(result);
metricService.incrementMetricCounter(MetricType.ENV_CLUSTERS_DELETION_FAILED, environmentDto, payload.getException());
eventService.sendEventAndNotification(environmentDto, context.getFlowTriggerUserCrn(), ResourceEvent.ENVIRONMENT_DELETION_FAILED);
}, () -> LOGGER.error("Cannot set delete failed to env because the environment does not exist: {}. " + "But the flow will continue, how can this happen?", payload.getResourceId()));
LOGGER.info("Flow entered into ENV_CLUSTERS_DELETE_FAILED_STATE");
sendEvent(context, HANDLED_FAILED_ENV_CLUSTERS_DELETE_EVENT.event(), payload);
}
@Override
protected CommonContext createFlowContext(FlowParameters flowParameters, StateContext<EnvClustersDeleteState, EnvClustersDeleteStateSelectors> stateContext, EnvClusterDeleteFailedEvent payload) {
Flow flow = getFlow(flowParameters.getFlowId());
flow.setFlowFailed(payload.getException());
return new CommonContext(flowParameters);
}
};
}
use of com.sequenceiq.flow.core.Flow in project cloudbreak by hortonworks.
the class SdxStopActions method failedAction.
@Bean(name = "SDX_STOP_FAILED_STATE")
public Action<?, ?> failedAction() {
return new AbstractSdxAction<>(SdxFailedEvent.class) {
@Override
protected SdxContext createFlowContext(FlowParameters flowParameters, StateContext<FlowState, FlowEvent> stateContext, SdxFailedEvent payload) {
return SdxContext.from(flowParameters, payload);
}
@Override
protected void doExecute(SdxContext context, SdxFailedEvent payload, Map<Object, Object> variables) throws Exception {
Exception exception = payload.getException();
DatalakeStatusEnum failedStatus = DatalakeStatusEnum.STOP_FAILED;
LOGGER.info("Update SDX status to {} for resource: {}", failedStatus, payload.getResourceId(), exception);
String statusReason = "SDX stop failed";
if (exception.getMessage() != null) {
statusReason = exception.getMessage();
}
Flow flow = getFlow(context.getFlowParameters().getFlowId());
flow.setFlowFailed(payload.getException());
// If this is part of DL resize, mark failure as such in order to enable proper recovery.
Optional<FlowLog> lastFlowLog = flowLogService.getLastFlowLog(context.getFlowParameters().getFlowId());
if (lastFlowLog.isPresent()) {
Optional<FlowChainLog> flowChainLog = flowChainLogService.findFirstByFlowChainIdOrderByCreatedDesc(lastFlowLog.get().getFlowChainId());
if (flowChainLog.isPresent() && flowChainLog.get().getFlowChainType().equals(DatalakeResizeFlowEventChainFactory.class.getSimpleName())) {
statusReason = "Datalake resize failure: " + statusReason;
}
}
eventSenderService.notifyEvent(context, ResourceEvent.SDX_STOP_FAILED);
sdxStatusService.setStatusForDatalakeAndNotify(failedStatus, statusReason, payload.getResourceId());
sendEvent(context, SDX_STOP_FAILED_HANDLED_EVENT.event(), payload);
}
@Override
protected Object getFailurePayload(SdxFailedEvent payload, Optional<SdxContext> flowContext, Exception ex) {
return null;
}
};
}
use of com.sequenceiq.flow.core.Flow in project cloudbreak by hortonworks.
the class SdxRepairActions method failedAction.
@Bean(name = "SDX_REPAIR_FAILED_STATE")
public Action<?, ?> failedAction() {
return new AbstractSdxAction<>(SdxRepairFailedEvent.class) {
@Override
protected SdxContext createFlowContext(FlowParameters flowParameters, StateContext<FlowState, FlowEvent> stateContext, SdxRepairFailedEvent payload) {
return SdxContext.from(flowParameters, payload);
}
@Override
protected void doExecute(SdxContext context, SdxRepairFailedEvent payload, Map<Object, Object> variables) throws Exception {
Exception exception = payload.getException();
LOGGER.error("Datalake repair failed for datalakeId: {}", payload.getResourceId(), exception);
String statusReason = "Datalake stack repair failed";
if (exception.getMessage() != null) {
statusReason = exception.getMessage();
}
Flow flow = getFlow(context.getFlowParameters().getFlowId());
flow.setFlowFailed(payload.getException());
SdxCluster sdxCluster = sdxStatusService.setStatusForDatalakeAndNotify(DatalakeStatusEnum.REPAIR_FAILED, statusReason, payload.getResourceId());
metricService.incrementMetricCounter(MetricType.SDX_REPAIR_FAILED, sdxCluster);
sendEvent(context, SDX_REPAIR_FAILED_HANDLED_EVENT.event(), payload);
}
@Override
protected Object getFailurePayload(SdxRepairFailedEvent payload, Optional<SdxContext> flowContext, Exception ex) {
return null;
}
};
}
use of com.sequenceiq.flow.core.Flow in project cloudbreak by hortonworks.
the class AbstractFlowConfiguration method createFlow.
@Override
public Flow createFlow(String flowId, String flowChainId, Long stackId, String flowChainType) {
StateMachine<S, E> sm = stateMachineFactory.getStateMachine();
FlowEventListener<S, E> fl = (FlowEventListener<S, E>) applicationContext.getBean(FlowEventListener.class, getEdgeConfig().initState, getEdgeConfig().finalState, flowChainType, getClass().getSimpleName(), flowChainId, flowId, stackId);
Flow flow = new FlowAdapter<>(flowId, sm, new MessageFactory<>(), new StateConverterAdapter<>(stateType), new EventConverterAdapter<>(eventType), (Class<? extends FlowConfiguration<E>>) getClass(), fl);
sm.addStateListener(fl);
return flow;
}
Aggregations