use of com.sequenceiq.datalake.entity.DatalakeStatusEnum in project cloudbreak by hortonworks.
the class StartDatahubActions method failedAction.
@Bean(name = "START_DATAHUB_FAILED_STATE")
public Action<?, ?> failedAction() {
return new AbstractSdxAction<>(StartDatahubFailedEvent.class) {
@Override
protected SdxContext createFlowContext(FlowParameters flowParameters, StateContext<FlowState, FlowEvent> stateContext, StartDatahubFailedEvent payload) {
return SdxContext.from(flowParameters, payload);
}
@Override
protected void doExecute(SdxContext context, StartDatahubFailedEvent payload, Map<Object, Object> variables) throws Exception {
Exception exception = payload.getException();
DatalakeStatusEnum failedStatus = DatalakeStatusEnum.START_FAILED;
LOGGER.info("Update SDX status to {} for resource: {}", failedStatus, payload.getResourceId(), exception);
String statusReason = "Datahub start failed";
if (exception.getMessage() != null) {
statusReason = exception.getMessage();
}
Flow flow = getFlow(context.getFlowParameters().getFlowId());
flow.setFlowFailed(payload.getException());
sdxStatusService.setStatusForDatalakeAndNotify(failedStatus, statusReason, payload.getResourceId());
sendEvent(context, START_DATAHUB_HANDLED_EVENT.event(), payload);
}
@Override
protected Object getFailurePayload(StartDatahubFailedEvent payload, Optional<SdxContext> flowContext, Exception ex) {
return null;
}
};
}
use of com.sequenceiq.datalake.entity.DatalakeStatusEnum in project cloudbreak by hortonworks.
the class SdxClusterStatusCheckerJob method updateToNodeFailure.
private DatalakeStatusEnum updateToNodeFailure(StackStatusV4Response stackStatus, SdxCluster sdx, SdxStatusEntity sdxStatus) {
DatalakeStatusEnum resultStatus = DatalakeStatusEnum.NODE_FAILURE;
if (!resultStatus.equals(sdxStatus.getStatus())) {
String statusReason = stackStatus.getStatus() == Status.NODE_FAILURE ? stackStatus.getStatusReason() : stackStatus.getClusterStatusReason();
sdxStatusService.setStatusForDatalakeAndNotify(resultStatus, ResourceEvent.CLUSTER_AMBARI_CLUSTER_SYNCHRONIZED, Collections.singleton(sdx.getClusterName()), statusReason, sdx);
}
return resultStatus;
}
use of com.sequenceiq.datalake.entity.DatalakeStatusEnum in project cloudbreak by hortonworks.
the class SdxClusterStatusCheckerJob method updateToUnreachable.
private DatalakeStatusEnum updateToUnreachable(StackStatusV4Response stackStatus, SdxCluster sdx, SdxStatusEntity sdxStatus) {
DatalakeStatusEnum resultStatus = DatalakeStatusEnum.CLUSTER_UNREACHABLE;
if (!resultStatus.equals(sdxStatus.getStatus())) {
String statusReason = stackStatus.getStatus() == Status.UNREACHABLE ? stackStatus.getStatusReason() : stackStatus.getClusterStatusReason();
sdxStatusService.setStatusForDatalakeAndNotify(resultStatus, ResourceEvent.CLUSTER_AMBARI_CLUSTER_SYNCHRONIZED, Collections.singleton(sdx.getClusterName()), statusReason, sdx);
}
return resultStatus;
}
use of com.sequenceiq.datalake.entity.DatalakeStatusEnum in project cloudbreak by hortonworks.
the class SdxStopActions method failedAction.
@Bean(name = "SDX_STOP_FAILED_STATE")
public Action<?, ?> failedAction() {
return new AbstractSdxAction<>(SdxFailedEvent.class) {
@Override
protected SdxContext createFlowContext(FlowParameters flowParameters, StateContext<FlowState, FlowEvent> stateContext, SdxFailedEvent payload) {
return SdxContext.from(flowParameters, payload);
}
@Override
protected void doExecute(SdxContext context, SdxFailedEvent payload, Map<Object, Object> variables) throws Exception {
Exception exception = payload.getException();
DatalakeStatusEnum failedStatus = DatalakeStatusEnum.STOP_FAILED;
LOGGER.info("Update SDX status to {} for resource: {}", failedStatus, payload.getResourceId(), exception);
String statusReason = "SDX stop failed";
if (exception.getMessage() != null) {
statusReason = exception.getMessage();
}
Flow flow = getFlow(context.getFlowParameters().getFlowId());
flow.setFlowFailed(payload.getException());
// If this is part of DL resize, mark failure as such in order to enable proper recovery.
Optional<FlowLog> lastFlowLog = flowLogService.getLastFlowLog(context.getFlowParameters().getFlowId());
if (lastFlowLog.isPresent()) {
Optional<FlowChainLog> flowChainLog = flowChainLogService.findFirstByFlowChainIdOrderByCreatedDesc(lastFlowLog.get().getFlowChainId());
if (flowChainLog.isPresent() && flowChainLog.get().getFlowChainType().equals(DatalakeResizeFlowEventChainFactory.class.getSimpleName())) {
statusReason = "Datalake resize failure: " + statusReason;
}
}
eventSenderService.notifyEvent(context, ResourceEvent.SDX_STOP_FAILED);
sdxStatusService.setStatusForDatalakeAndNotify(failedStatus, statusReason, payload.getResourceId());
sendEvent(context, SDX_STOP_FAILED_HANDLED_EVENT.event(), payload);
}
@Override
protected Object getFailurePayload(SdxFailedEvent payload, Optional<SdxContext> flowContext, Exception ex) {
return null;
}
};
}
use of com.sequenceiq.datalake.entity.DatalakeStatusEnum in project cloudbreak by hortonworks.
the class ResizeRecoveryService method validateRecovery.
@Override
public SdxRecoverableResponse validateRecovery(SdxCluster sdxCluster, SdxRecoveryRequest request) {
if (!entitlementService.isDatalakeResizeRecoveryEnabled(ThreadBasedUserCrnProvider.getAccountId())) {
return new SdxRecoverableResponse("Resize Recovery entitlement not enabled", RecoveryStatus.NON_RECOVERABLE);
}
SdxStatusEntity actualStatusForSdx = sdxStatusService.getActualStatusForSdx(sdxCluster);
DatalakeStatusEnum status = actualStatusForSdx.getStatus();
String statusReason = actualStatusForSdx.getStatusReason();
if (getOldCluster(sdxCluster).isPresent()) {
return validateRecoveryResizedClusterPresent(sdxCluster, status, statusReason);
}
return validateRecoveryOnlyOriginalCluster(sdxCluster, status, statusReason);
}
Aggregations