use of com.sequenceiq.datalake.flow.detach.event.SdxStartDetachEvent in project cloudbreak by hortonworks.
the class SdxDetachActions method sdxDetachCluster.
@Bean(name = "SDX_DETACH_CLUSTER_STATE")
public Action<?, ?> sdxDetachCluster() {
return new AbstractSdxAction<>(SdxStartDetachEvent.class) {
@Override
protected SdxContext createFlowContext(FlowParameters flowParameters, StateContext<FlowState, FlowEvent> stateContext, SdxStartDetachEvent payload) {
return SdxContext.from(flowParameters, payload);
}
@Override
protected void doExecute(SdxContext context, SdxStartDetachEvent payload, Map<Object, Object> variables) {
LOGGER.info("Detaching of SDX with ID {} in progress.", payload.getResourceId());
SdxCluster detached = sdxDetachService.detachCluster(payload.getResourceId(), payload.isDetachDuringRecovery());
variables.put(RESIZED_SDX, payload.getSdxCluster());
variables.put(DETACHED_SDX, detached);
variables.put(IS_DETACH_DURING_RECOVERY, payload.isDetachDuringRecovery());
eventSenderService.sendEventAndNotification(detached, context.getFlowTriggerUserCrn(), ResourceEvent.SDX_DETACH_STARTED, List.of(detached.getClusterName()));
sendEvent(context, SDX_DETACH_CLUSTER_SUCCESS_EVENT.event(), payload);
}
@Override
protected Object getFailurePayload(SdxStartDetachEvent payload, Optional<SdxContext> flowContext, Exception e) {
return SdxDetachFailedEvent.from(payload, e);
}
};
}
use of com.sequenceiq.datalake.flow.detach.event.SdxStartDetachEvent in project cloudbreak by hortonworks.
the class DatalakeResizeFlowEventChainFactory method createFlowTriggerEventQueue.
@Override
public FlowTriggerEventQueue createFlowTriggerEventQueue(DatalakeResizeFlowChainStartEvent event) {
Queue<Selectable> chain = new ConcurrentLinkedQueue<>();
if (event.shouldTakeBackup()) {
// Take a backup
chain.add(new DatalakeTriggerBackupEvent(DATALAKE_TRIGGER_BACKUP_EVENT.event(), event.getResourceId(), event.getUserId(), event.getBackupLocation(), "resize" + System.currentTimeMillis(), DatalakeBackupFailureReason.BACKUP_ON_RESIZE, event.accepted()));
// Stop datalake
chain.add(new SdxStartStopEvent(SDX_STOP_EVENT.event(), event.getResourceId(), event.getUserId(), STOP_DATAHUBS));
} else {
chain.add(new SdxStartStopEvent(SDX_STOP_EVENT.event(), event.getResourceId(), event.getUserId(), STOP_DATAHUBS, event.accepted()));
}
// Detach sdx from environment
chain.add(new SdxStartDetachEvent(SDX_DETACH_EVENT.event(), event.getResourceId(), event.getSdxCluster(), event.getUserId()));
// Create new
chain.add(new SdxEvent(SDX_VALIDATION_EVENT.event(), event.getResourceId(), event.getSdxCluster().getClusterName(), event.getUserId()));
if (event.shouldPerformRestore()) {
// restore the new cluster
chain.add(new DatalakeTriggerRestoreEvent(DATALAKE_TRIGGER_RESTORE_EVENT.event(), event.getResourceId(), event.getSdxCluster().getClusterName(), event.getUserId(), null, event.getBackupLocation(), null, DatalakeRestoreFailureReason.RESTORE_ON_RESIZE));
}
// Delete the detached Sdx
chain.add(new SdxDeleteStartEvent(SDX_DELETE_EVENT.event(), event.getResourceId(), event.getUserId(), true));
chain.add(new DatahubRefreshStartEvent(event.getResourceId(), event.getSdxCluster().getClusterName(), event.getUserId()));
return new FlowTriggerEventQueue(getName(), event, chain);
}
use of com.sequenceiq.datalake.flow.detach.event.SdxStartDetachEvent in project cloudbreak by hortonworks.
the class DatalakeResizeRecoveryFlowEventChainFactory method createDetachEventForNewCluster.
private SdxStartDetachEvent createDetachEventForNewCluster(DatalakeResizeRecoveryFlowChainStartEvent event) {
SdxCluster fauxCluster = new SdxCluster();
fauxCluster.setClusterName(event.getNewCluster().getClusterName());
LOGGER.info("Generated a " + SDX_DETACH_EVENT.event() + " for the datalake resize recovery flow chain.");
SdxStartDetachEvent startDetachEvent = new SdxStartDetachEvent(SDX_DETACH_EVENT.event(), event.getNewCluster().getId(), fauxCluster, event.getUserId(), event.accepted());
startDetachEvent.setDetachDuringRecovery(true);
return startDetachEvent;
}
Aggregations