use of com.sequenceiq.datalake.service.AbstractSdxAction in project cloudbreak by hortonworks.
the class DatalakeBackupActions method datalakeBackupInProgress.
@Bean(name = "DATALAKE_DATABASE_BACKUP_IN_PROGRESS_STATE")
public Action<?, ?> datalakeBackupInProgress() {
return new AbstractSdxAction<>(SdxEvent.class) {
@Override
protected SdxContext createFlowContext(FlowParameters flowParameters, StateContext<FlowState, FlowEvent> stateContext, SdxEvent payload) {
return SdxContext.from(flowParameters, payload);
}
@Override
protected void doExecute(SdxContext context, SdxEvent payload, Map<Object, Object> variables) {
LOGGER.info("Datalake database backup is in progress for {} ", payload.getResourceId());
String operationId = (String) variables.get(OPERATION_ID);
sdxBackupRestoreService.updateDatabaseStatusEntry(operationId, SdxOperationStatus.INPROGRESS, null);
SdxCluster sdxCluster = sdxStatusService.setStatusForDatalakeAndNotify(DatalakeStatusEnum.DATALAKE_BACKUP_INPROGRESS, ResourceEvent.DATALAKE_BACKUP_IN_PROGRESS, "Datalake backup in progress", payload.getResourceId());
metricService.incrementMetricCounter(MetricType.SDX_BACKUP_REQUESTED, sdxCluster);
sendEvent(context, DatalakeDatabaseBackupWaitRequest.from(context, operationId));
}
@Override
protected Object getFailurePayload(SdxEvent payload, Optional<SdxContext> flowContext, Exception ex) {
return DatalakeDatabaseBackupFailedEvent.from(payload, ex);
}
};
}
use of com.sequenceiq.datalake.service.AbstractSdxAction in project cloudbreak by hortonworks.
the class DatalakeBackupActions method triggerDatalakeBackup.
@Bean(name = "DATALAKE_TRIGGERING_BACKUP_STATE")
public Action<?, ?> triggerDatalakeBackup() {
return new AbstractSdxAction<>(DatalakeTriggerBackupEvent.class) {
@Override
protected SdxContext createFlowContext(FlowParameters flowParameters, StateContext<FlowState, FlowEvent> stateContext, DatalakeTriggerBackupEvent payload) {
return SdxContext.from(flowParameters, payload);
}
@Override
protected void prepareExecution(DatalakeTriggerBackupEvent payload, Map<Object, Object> variables) {
variables.put(OPERATION_ID, payload.getDrStatus().getOperationId());
variables.put(REASON, payload.getReason().name());
super.prepareExecution(payload, variables);
}
@Override
protected void doExecute(SdxContext context, DatalakeTriggerBackupEvent payload, Map<Object, Object> variables) {
LOGGER.info("Triggering data lake backup for {}", payload.getResourceId());
SdxCluster sdxCluster = sdxService.getById(payload.getResourceId());
eventSenderService.sendEventAndNotification(sdxCluster, context.getFlowTriggerUserCrn(), ResourceEvent.DATALAKE_BACKUP_IN_PROGRESS);
DatalakeBackupStatusResponse backupStatusResponse = sdxBackupRestoreService.triggerDatalakeBackup(payload.getResourceId(), payload.getBackupLocation(), payload.getBackupName(), payload.getUserId());
variables.put(BACKUP_ID, backupStatusResponse.getBackupId());
variables.put(OPERATION_ID, backupStatusResponse.getBackupId());
payload.getDrStatus().setOperationId(backupStatusResponse.getBackupId());
if (!backupStatusResponse.failed()) {
sendEvent(context, DatalakeDatabaseBackupStartEvent.from(payload, backupStatusResponse.getBackupId()));
} else {
sendEvent(context, DATALAKE_BACKUP_FAILED_EVENT.event(), payload);
}
}
@Override
protected Object getFailurePayload(DatalakeTriggerBackupEvent payload, Optional<SdxContext> flowContext, Exception ex) {
return DatalakeBackupFailedEvent.from(payload, ex);
}
};
}
use of com.sequenceiq.datalake.service.AbstractSdxAction in project cloudbreak by hortonworks.
the class SdxCmDiagnosticsActions method startCmDiagnosticsCollection.
@Bean(name = "CM_DIAGNOSTICS_COLLECTION_START_STATE")
public Action<?, ?> startCmDiagnosticsCollection() {
return new AbstractSdxAction<>(SdxCmDiagnosticsCollectionEvent.class) {
@Override
protected SdxContext createFlowContext(FlowParameters flowParameters, StateContext<FlowState, FlowEvent> stateContext, SdxCmDiagnosticsCollectionEvent payload) {
return SdxContext.from(flowParameters, payload);
}
@Override
protected void doExecute(SdxContext context, SdxCmDiagnosticsCollectionEvent payload, Map<Object, Object> variables) {
LOGGER.debug("Start CM based diagnostics collection for sdx cluster with id: {}", context.getSdxId());
FlowIdentifier flowIdentifier = diagnosticsFlowService.startCmDiagnosticsCollection(payload.getProperties());
SdxCmDiagnosticsCollectionEvent event = new SdxCmDiagnosticsCollectionEvent(payload.getResourceId(), payload.getUserId(), payload.getProperties(), flowIdentifier);
sendEvent(context, SDX_CM_DIAGNOSTICS_COLLECTION_IN_PROGRESS_EVENT.event(), event);
}
@Override
protected Object getFailurePayload(SdxCmDiagnosticsCollectionEvent payload, Optional<SdxContext> flowContext, Exception ex) {
return SdxCmDiagnosticsFailedEvent.from(payload, ex);
}
};
}
use of com.sequenceiq.datalake.service.AbstractSdxAction in project cloudbreak by hortonworks.
the class SdxStopActions method failedAction.
@Bean(name = "SDX_STOP_FAILED_STATE")
public Action<?, ?> failedAction() {
return new AbstractSdxAction<>(SdxFailedEvent.class) {
@Override
protected SdxContext createFlowContext(FlowParameters flowParameters, StateContext<FlowState, FlowEvent> stateContext, SdxFailedEvent payload) {
return SdxContext.from(flowParameters, payload);
}
@Override
protected void doExecute(SdxContext context, SdxFailedEvent payload, Map<Object, Object> variables) throws Exception {
Exception exception = payload.getException();
DatalakeStatusEnum failedStatus = DatalakeStatusEnum.STOP_FAILED;
LOGGER.info("Update SDX status to {} for resource: {}", failedStatus, payload.getResourceId(), exception);
String statusReason = "SDX stop failed";
if (exception.getMessage() != null) {
statusReason = exception.getMessage();
}
Flow flow = getFlow(context.getFlowParameters().getFlowId());
flow.setFlowFailed(payload.getException());
// If this is part of DL resize, mark failure as such in order to enable proper recovery.
Optional<FlowLog> lastFlowLog = flowLogService.getLastFlowLog(context.getFlowParameters().getFlowId());
if (lastFlowLog.isPresent()) {
Optional<FlowChainLog> flowChainLog = flowChainLogService.findFirstByFlowChainIdOrderByCreatedDesc(lastFlowLog.get().getFlowChainId());
if (flowChainLog.isPresent() && flowChainLog.get().getFlowChainType().equals(DatalakeResizeFlowEventChainFactory.class.getSimpleName())) {
statusReason = "Datalake resize failure: " + statusReason;
}
}
eventSenderService.notifyEvent(context, ResourceEvent.SDX_STOP_FAILED);
sdxStatusService.setStatusForDatalakeAndNotify(failedStatus, statusReason, payload.getResourceId());
sendEvent(context, SDX_STOP_FAILED_HANDLED_EVENT.event(), payload);
}
@Override
protected Object getFailurePayload(SdxFailedEvent payload, Optional<SdxContext> flowContext, Exception ex) {
return null;
}
};
}
use of com.sequenceiq.datalake.service.AbstractSdxAction in project cloudbreak by hortonworks.
the class SdxStartActions method finishedAction.
@Bean(name = "SDX_START_FINISHED_STATE")
public Action<?, ?> finishedAction() {
return new AbstractSdxAction<>(SdxStartSuccessEvent.class) {
@Override
protected SdxContext createFlowContext(FlowParameters flowParameters, StateContext<FlowState, FlowEvent> stateContext, SdxStartSuccessEvent payload) {
return SdxContext.from(flowParameters, payload);
}
@Override
protected void doExecute(SdxContext context, SdxStartSuccessEvent payload, Map<Object, Object> variables) throws Exception {
LOGGER.info("SDX start finalized: {}", payload.getResourceId());
SdxCluster sdxCluster = sdxStatusService.setStatusForDatalakeAndNotify(DatalakeStatusEnum.RUNNING, "Datalake is running", payload.getResourceId());
metricService.incrementMetricCounter(MetricType.SDX_START_FINISHED, sdxCluster);
sendEvent(context, SDX_START_FINALIZED_EVENT.event(), payload);
}
@Override
protected Object getFailurePayload(SdxStartSuccessEvent payload, Optional<SdxContext> flowContext, Exception ex) {
return null;
}
};
}
Aggregations