use of com.sequenceiq.datalake.flow.dr.restore.event.DatalakeRestoreFailedEvent in project cloudbreak by hortonworks.
the class DatalakeFullRestoreWaitHandler method doAccept.
@Override
protected Selectable doAccept(HandlerEvent<DatalakeFullRestoreWaitRequest> event) {
DatalakeFullRestoreWaitRequest request = event.getData();
Long sdxId = request.getResourceId();
String userId = request.getUserId();
Selectable response;
try {
LOGGER.info("Start polling datalake full restore status for id: {}", sdxId);
PollingConfig pollingConfig = new PollingConfig(sleepTimeInSec, TimeUnit.SECONDS, durationInMinutes, TimeUnit.MINUTES);
sdxBackupRestoreService.waitForDatalakeDrRestoreToComplete(sdxId, request.getOperationId(), request.getUserId(), pollingConfig, "Full restore");
response = new DatalakeRestoreSuccessEvent(sdxId, userId, request.getOperationId());
} catch (UserBreakException userBreakException) {
LOGGER.info("Full restore polling exited before timeout. Cause: ", userBreakException);
response = new DatalakeRestoreFailedEvent(sdxId, userId, userBreakException);
} catch (PollerStoppedException pollerStoppedException) {
LOGGER.info("Full restore poller stopped for cluster: {}", sdxId);
response = new DatalakeRestoreFailedEvent(sdxId, userId, new PollerStoppedException("Data lake restore timed out after " + durationInMinutes + " minutes"));
} catch (PollerException exception) {
LOGGER.info("Full restore polling failed for cluster: {}", sdxId);
response = new DatalakeRestoreFailedEvent(sdxId, userId, exception);
} catch (CloudbreakApiException exception) {
LOGGER.info("Datalake restore failed. Reason: " + exception.getMessage());
response = new DatalakeRestoreFailedEvent(sdxId, userId, exception);
}
return response;
}
use of com.sequenceiq.datalake.flow.dr.restore.event.DatalakeRestoreFailedEvent in project cloudbreak by hortonworks.
the class DatalakeRestoreActions method restoreFailed.
@Bean(name = "DATALAKE_RESTORE_FAILED_STATE")
public Action<?, ?> restoreFailed() {
return new AbstractSdxAction<>(DatalakeRestoreFailedEvent.class) {
@Override
protected SdxContext createFlowContext(FlowParameters flowParameters, StateContext<FlowState, FlowEvent> stateContext, DatalakeRestoreFailedEvent payload) {
return SdxContext.from(flowParameters, payload);
}
@Override
protected void doExecute(SdxContext context, DatalakeRestoreFailedEvent payload, Map<Object, Object> variables) {
Exception exception = payload.getException();
LOGGER.error("Datalake database restore could not be started for datalake with id: {}", payload.getResourceId(), exception);
SdxCluster sdxCluster = sdxStatusService.setStatusForDatalakeAndNotify(DatalakeStatusEnum.RUNNING, ResourceEvent.DATALAKE_RESTORE_FINISHED, "Datalake is running, Datalake restore failed", payload.getResourceId());
eventSenderService.sendEventAndNotification(sdxCluster, context.getFlowTriggerUserCrn(), ResourceEvent.DATALAKE_RESTORE_FAILED, List.of(exception.getMessage()));
Flow flow = getFlow(context.getFlowParameters().getFlowId());
flow.setFlowFailed(payload.getException());
metricService.incrementMetricCounter(MetricType.SDX_RESTORE_FAILED, sdxCluster);
sendEvent(context, DATALAKE_RESTORE_FAILURE_HANDLED_EVENT.event(), payload);
}
@Override
protected Object getFailurePayload(DatalakeRestoreFailedEvent payload, Optional<SdxContext> flowContext, Exception ex) {
return null;
}
};
}
Aggregations