use of com.sequenceiq.datalake.entity.SdxCluster in project cloudbreak by hortonworks.
the class DatalakeBackupActions method datalakeBackup.
@Bean(name = "DATALAKE_DATABASE_BACKUP_START_STATE")
public Action<?, ?> datalakeBackup() {
return new AbstractSdxAction<>(DatalakeDatabaseBackupStartEvent.class) {
@Override
protected SdxContext createFlowContext(FlowParameters flowParameters, StateContext<FlowState, FlowEvent> stateContext, DatalakeDatabaseBackupStartEvent payload) {
return SdxContext.from(flowParameters, payload);
}
@Override
protected void prepareExecution(DatalakeDatabaseBackupStartEvent payload, Map<Object, Object> variables) {
super.prepareExecution(payload, variables);
if (!variables.containsKey(OPERATION_ID)) {
variables.put(OPERATION_ID, payload.getDrStatus().getOperationId());
}
if (!variables.containsKey(BACKUP_ID)) {
variables.put(BACKUP_ID, payload.getBackupRequest().getBackupId());
}
}
@Override
protected void doExecute(SdxContext context, DatalakeDatabaseBackupStartEvent payload, Map<Object, Object> variables) {
LOGGER.info("Datalake database backup has been started for {}", payload.getResourceId());
SdxCluster sdxCluster = sdxService.getById(payload.getResourceId());
eventSenderService.sendEventAndNotification(sdxCluster, context.getFlowTriggerUserCrn(), ResourceEvent.DATALAKE_DATABASE_BACKUP);
sdxBackupRestoreService.databaseBackup(payload.getDrStatus(), payload.getResourceId(), payload.getBackupRequest());
sendEvent(context, DATALAKE_DATABASE_BACKUP_IN_PROGRESS_EVENT.event(), payload);
}
@Override
protected Object getFailurePayload(DatalakeDatabaseBackupStartEvent payload, Optional<SdxContext> flowContext, Exception ex) {
return DatalakeDatabaseBackupCouldNotStartEvent.from(payload, ex);
}
};
}
use of com.sequenceiq.datalake.entity.SdxCluster in project cloudbreak by hortonworks.
the class DatalakeStructuredFlowEventFactory method createStructuredFlowEvent.
@Override
public CDPStructuredFlowEvent<SdxClusterDto> createStructuredFlowEvent(Long resourceId, FlowDetails flowDetails, Boolean detailed, Exception exception) {
SdxCluster sdxCluster = sdxService.getById(resourceId);
CDPOperationDetails operationDetails = makeCdpOperationDetails(resourceId, sdxCluster);
SdxClusterDto sdxClusterDto = sdxClusterDtoConverter.sdxClusterToDto(sdxCluster);
SdxStatusEntity sdxStatus = sdxStatusRepository.findFirstByDatalakeIsOrderByIdDesc(sdxCluster);
String status = sdxStatus.getStatus().name();
String statusReason = sdxStatus.getStatusReason();
CDPStructuredFlowEvent<SdxClusterDto> event = new CDPStructuredFlowEvent<>(operationDetails, flowDetails, sdxClusterDto, status, statusReason);
if (exception != null) {
event.setException(ExceptionUtils.getStackTrace(exception));
}
return event;
}
use of com.sequenceiq.datalake.entity.SdxCluster in project cloudbreak by hortonworks.
the class SdxDetachService method detachCluster.
/**
* Detaches the internal SDX cluster by assigning it a new "detached" name and CRN.
*/
public SdxCluster detachCluster(Long sdxID) {
LOGGER.info("Started detaching SDX cluster with ID: {}.", sdxID);
sdxStatusService.setStatusForDatalakeAndNotify(DatalakeStatusEnum.STOPPED, "Datalake detach in progress.", sdxID);
SdxCluster cluster = sdxService.getById(sdxID);
String originalName = cluster.getClusterName();
sdxAttachDetachUtils.updateClusterNameAndCrn(cluster, sdxDetachNameGenerator.generateDetachedClusterName(originalName), regionAwareCrnGenerator.generateCrnStringWithUuid(CrnResourceDescriptor.DATALAKE, cluster.getAccountId()));
cluster.setDetached(true);
SdxCluster saved = sdxService.save(cluster);
LOGGER.info("Finished detaching SDX cluster with ID: {}. Modified name from {} to {} and crn from {} to {}.", saved.getId(), originalName, saved.getClusterName(), saved.getOriginalCrn(), saved.getCrn());
return saved;
}
use of com.sequenceiq.datalake.entity.SdxCluster in project cloudbreak by hortonworks.
the class SdxCcmUpgradeServiceTest method getSdxCluster.
private SdxCluster getSdxCluster() {
SdxCluster sdxCluster = new SdxCluster();
sdxCluster.setEnvCrn(ENV_CRN);
sdxCluster.setClusterName(CLUSTER_NAME);
sdxCluster.setStackCrn(STACK_CRN);
return sdxCluster;
}
use of com.sequenceiq.datalake.entity.SdxCluster in project cloudbreak by hortonworks.
the class SdxCcmUpgradeServiceTest method testInitAndWaitForStackUpgrade.
@Test
void testInitAndWaitForStackUpgrade() {
when(regionAwareInternalCrnGeneratorFactory.iam()).thenReturn(regionAwareInternalCrnGenerator);
when(regionAwareInternalCrnGenerator.getInternalCrnForServiceAsString()).thenReturn("crn:altus:iam:us-west-1:altus:user:__internal__actor__");
FlowIdentifier flowId = new FlowIdentifier(FlowType.FLOW, "pollableId");
StackCcmUpgradeV4Response upgradeResponse = new StackCcmUpgradeV4Response(flowId);
when(stackV4Endpoint.upgradeCcmByCrnInternal(eq(0L), eq(STACK_CRN), any())).thenReturn(upgradeResponse);
PollingConfig pc = new PollingConfig(1L, TimeUnit.HOURS, 1L, TimeUnit.HOURS);
SdxCluster sdx = getSdxCluster();
underTest.initAndWaitForStackUpgrade(sdx, pc);
verify(stackV4Endpoint).upgradeCcmByCrnInternal(any(), eq(STACK_CRN), any());
verify(cloudbreakPoller).pollCcmUpgradeUntilAvailable(sdx, pc);
}
Aggregations