use of com.sequenceiq.datalake.entity.SdxStatusEntity in project cloudbreak by hortonworks.
the class DatalakeStructuredFlowEventFactory method createStructuredFlowEvent.
@Override
public CDPStructuredFlowEvent<SdxClusterDto> createStructuredFlowEvent(Long resourceId, FlowDetails flowDetails, Boolean detailed, Exception exception) {
SdxCluster sdxCluster = sdxService.getById(resourceId);
CDPOperationDetails operationDetails = makeCdpOperationDetails(resourceId, sdxCluster);
SdxClusterDto sdxClusterDto = sdxClusterDtoConverter.sdxClusterToDto(sdxCluster);
SdxStatusEntity sdxStatus = sdxStatusRepository.findFirstByDatalakeIsOrderByIdDesc(sdxCluster);
String status = sdxStatus.getStatus().name();
String statusReason = sdxStatus.getStatusReason();
CDPStructuredFlowEvent<SdxClusterDto> event = new CDPStructuredFlowEvent<>(operationDetails, flowDetails, sdxClusterDto, status, statusReason);
if (exception != null) {
event.setException(ExceptionUtils.getStackTrace(exception));
}
return event;
}
use of com.sequenceiq.datalake.entity.SdxStatusEntity in project cloudbreak by hortonworks.
the class SdxStatusServiceTest method setStatusForDatalakeAndNotify.
@Test
void setStatusForDatalakeAndNotify() throws TransactionService.TransactionExecutionException {
doAnswer(invocation -> {
invocation.getArgument(0, Runnable.class).run();
return null;
}).when(transactionService).required(any(Runnable.class));
long deletedTimeStamp = 100L;
when(clock.getCurrentTimeMillis()).thenReturn(deletedTimeStamp);
SdxCluster sdxCluster = new SdxCluster();
sdxCluster.setRuntime("7.0.2");
sdxCluster.setClusterName("datalake-cluster");
sdxCluster.setId(2L);
SdxStatusEntity oldStatus = new SdxStatusEntity();
oldStatus.setStatus(DatalakeStatusEnum.STACK_DELETED);
oldStatus.setCreated(1L);
oldStatus.setStatusReason("stack deleted");
oldStatus.setId(1L);
oldStatus.setDatalake(sdxCluster);
when(sdxStatusRepository.findFirstByDatalakeIsOrderByIdDesc(any(SdxCluster.class))).thenReturn(oldStatus);
when(sdxClusterRepository.findById(eq(2L))).thenReturn(Optional.of(sdxCluster));
ArgumentCaptor<SdxStatusEntity> statusEntityCaptor = ArgumentCaptor.forClass(SdxStatusEntity.class);
when(sdxStatusRepository.save(statusEntityCaptor.capture())).thenReturn(null);
ArgumentCaptor<SdxCluster> sdxClusterCaptor = ArgumentCaptor.forClass(SdxCluster.class);
when(sdxClusterRepository.save(sdxClusterCaptor.capture())).thenReturn(sdxCluster);
sdxStatusService.setStatusForDatalakeAndNotify(DatalakeStatusEnum.DELETED, ResourceEvent.SDX_RDS_DELETION_FINISHED, "deleted", sdxCluster);
verify(sdxStatusRepository, times(1)).save(any(SdxStatusEntity.class));
assertEquals(DatalakeStatusEnum.DELETED, statusEntityCaptor.getValue().getStatus());
assertEquals(Long.valueOf(deletedTimeStamp), sdxClusterCaptor.getValue().getDeleted());
verify(transactionService, times(1)).required(any(Runnable.class));
}
use of com.sequenceiq.datalake.entity.SdxStatusEntity in project cloudbreak by hortonworks.
the class SdxClusterStatusCheckerJobTest method setUp.
@BeforeEach
void setUp() {
underTest.setLocalId(SDX_ID.toString());
underTest.setRemoteResourceCrn(STACK_ID.toString());
sdxCluster = new SdxCluster();
sdxCluster.setClusterName("data-lake-cluster");
when(sdxClusterRepository.findById(SDX_ID)).thenReturn(Optional.of(sdxCluster));
stack = new StackStatusV4Response();
when(cloudbreakInternalCrnClient.withInternalCrn()).thenReturn(cloudbreakServiceCrnEndpoints);
when(cloudbreakServiceCrnEndpoints.autoscaleEndpoint()).thenReturn(autoscaleV4Endpoint);
when(autoscaleV4Endpoint.getStatusByCrn(STACK_ID.toString())).thenReturn(stack);
status = new SdxStatusEntity();
status.setDatalake(sdxCluster);
when(sdxStatusService.getActualStatusForSdx(sdxCluster)).thenReturn(status);
jobDataMap = new JobDataMap();
when(jobExecutionContext.getMergedJobDataMap()).thenReturn(jobDataMap);
when(flowLogService.isOtherFlowRunning(any())).thenReturn(false);
}
use of com.sequenceiq.datalake.entity.SdxStatusEntity in project cloudbreak by hortonworks.
the class SdxControllerTest method getTest.
@Test
void getTest() {
SdxCluster sdxCluster = getValidSdxCluster();
when(sdxService.getByNameInAccount(anyString(), anyString())).thenReturn(sdxCluster);
SdxStatusEntity sdxStatusEntity = new SdxStatusEntity();
sdxStatusEntity.setStatus(DatalakeStatusEnum.REQUESTED);
sdxStatusEntity.setStatusReason("statusreason");
sdxStatusEntity.setCreated(1L);
when(sdxStatusService.getActualStatusForSdx(sdxCluster)).thenReturn(sdxStatusEntity);
ReflectionTestUtils.setField(sdxClusterConverter, "sdxStatusService", sdxStatusService);
SdxClusterResponse sdxClusterResponse = ThreadBasedUserCrnProvider.doAs(USER_CRN, () -> sdxController.get(SDX_CLUSTER_NAME));
assertEquals(SDX_CLUSTER_NAME, sdxClusterResponse.getName());
assertEquals("test-env", sdxClusterResponse.getEnvironmentName());
assertEquals("crn:sdxcluster", sdxClusterResponse.getCrn());
assertEquals(SdxClusterStatusResponse.REQUESTED, sdxClusterResponse.getStatus());
assertEquals("statusreason", sdxClusterResponse.getStatusReason());
}
use of com.sequenceiq.datalake.entity.SdxStatusEntity in project cloudbreak by hortonworks.
the class SdxService method syncComponentVersionsFromCm.
public FlowIdentifier syncComponentVersionsFromCm(String userCrn, NameOrCrn clusterNameOrCrn) {
SdxCluster cluster = getByNameOrCrn(userCrn, clusterNameOrCrn);
MDCBuilder.buildMdcContext(cluster);
SdxStatusEntity sdxStatus = sdxStatusService.getActualStatusForSdx(cluster);
if (sdxStatus.getStatus().isStopState()) {
String message = String.format("Reading CM and parcel versions from CM cannot be initiated as the datalake is in %s state", sdxStatus.getStatus());
LOGGER.info(message);
throw new BadRequestException(message);
} else {
LOGGER.info("Syncing CM and parcel versions from CM initiated");
return sdxReactorFlowManager.triggerDatalakeSyncComponentVersionsFromCmFlow(cluster);
}
}
Aggregations