use of com.sequenceiq.common.model.diagnostics.AwsDiagnosticParameters in project cloudbreak by hortonworks.
the class DiagnosticCloudStorageConverter method loggingToS3.
public AwsDiagnosticParameters loggingToS3(Logging logging, String region) {
AwsDiagnosticParameters.AwsDiagnosticParametersBuilder awsBuilder = AwsDiagnosticParameters.builder();
S3Config s3Config = s3ConfigGenerator.generateStorageConfig(logging.getStorageLocation());
return awsBuilder.withS3Bucket(s3Config.getBucket()).withS3Location(Paths.get(s3Config.getFolderPrefix(), DIAGNOSTICS_SUFFIX_PATH).toString()).withS3Region(region).build();
}
use of com.sequenceiq.common.model.diagnostics.AwsDiagnosticParameters in project cloudbreak by hortonworks.
the class CloudStorageValidationServiceTest method testValidationWithFailingIdbrokerAndRandomHost.
@Test
void testValidationWithFailingIdbrokerAndRandomHost() throws CloudbreakOrchestratorException {
Stack stack = getStack();
when(stackService.getByIdWithClusterInTransaction(STACK_ID)).thenReturn(stack);
when(entitlementService.cloudStorageValidationOnVmEnabled(anyString())).thenReturn(true);
DetailedEnvironmentResponse environment = getEnvironment();
when(environmentClientService.getByCrn(ENV_CRN)).thenReturn(environment);
List<GatewayConfig> gatewayConfigs = getGatewayConfigs();
when(gatewayConfigService.getAllGatewayConfigs(stack)).thenReturn(gatewayConfigs);
Set<Node> nodes = getNodes();
when(stackUtil.collectNodes(stack)).thenReturn(nodes);
when(diagnosticCloudStorageConverter.loggingResponseToCloudStorageDiagnosticsParameters(environment.getTelemetry().getLogging(), REGION)).thenReturn(new AwsDiagnosticParameters());
doThrow(new CloudbreakOrchestratorFailedException("failed")).when(telemetryOrchestrator).validateCloudStorage(anyList(), anySet(), any(), anyMap(), any());
assertThrows(CloudbreakOrchestratorFailedException.class, () -> underTest.validateCloudStorage(STACK_ID), "If provisioning was done using the UI, then verify the log's instance profile and logs location base when provisioning");
ArgumentCaptor<Set<String>> targetHostnamesCaptor = ArgumentCaptor.forClass(Set.class);
verify(telemetryOrchestrator, times(2)).validateCloudStorage(anyList(), anySet(), targetHostnamesCaptor.capture(), anyMap(), any());
verify(eventService, times(1)).fireCloudbreakEvent(anyLong(), anyString(), any());
List<Set<String>> capturedTargetHostnames = targetHostnamesCaptor.getAllValues();
List<String> idbrokerTargets = capturedTargetHostnames.stream().flatMap(Collection::stream).filter(s -> s.equals(IDBROKER_HOSTNAME)).collect(Collectors.toList());
List<String> nonIdbrokerTargets = capturedTargetHostnames.stream().flatMap(Collection::stream).filter(s -> !s.equals(IDBROKER_HOSTNAME)).collect(Collectors.toList());
assertEquals(1, idbrokerTargets.size());
assertEquals(1, nonIdbrokerTargets.size());
}
use of com.sequenceiq.common.model.diagnostics.AwsDiagnosticParameters in project cloudbreak by hortonworks.
the class CloudStorageValidationServiceTest method testValidationWithIdbrokerAndRandomHost.
@Test
void testValidationWithIdbrokerAndRandomHost() throws CloudbreakOrchestratorException {
Stack stack = getStack();
when(stackService.getByIdWithClusterInTransaction(STACK_ID)).thenReturn(stack);
when(entitlementService.cloudStorageValidationOnVmEnabled(anyString())).thenReturn(true);
DetailedEnvironmentResponse environment = getEnvironment();
when(environmentClientService.getByCrn(ENV_CRN)).thenReturn(environment);
List<GatewayConfig> gatewayConfigs = getGatewayConfigs();
when(gatewayConfigService.getAllGatewayConfigs(stack)).thenReturn(gatewayConfigs);
Set<Node> nodes = getNodes();
when(stackUtil.collectNodes(stack)).thenReturn(nodes);
when(diagnosticCloudStorageConverter.loggingResponseToCloudStorageDiagnosticsParameters(environment.getTelemetry().getLogging(), REGION)).thenReturn(new AwsDiagnosticParameters());
underTest.validateCloudStorage(STACK_ID);
ArgumentCaptor<Set<String>> targetHostnamesCaptor = ArgumentCaptor.forClass(Set.class);
verify(telemetryOrchestrator, times(2)).validateCloudStorage(anyList(), anySet(), targetHostnamesCaptor.capture(), anyMap(), any());
List<Set<String>> capturedTargetHostnames = targetHostnamesCaptor.getAllValues();
List<String> idbrokerTargets = capturedTargetHostnames.stream().flatMap(Collection::stream).filter(s -> s.equals(IDBROKER_HOSTNAME)).collect(Collectors.toList());
List<String> nonIdbrokerTargets = capturedTargetHostnames.stream().flatMap(Collection::stream).filter(s -> !s.equals(IDBROKER_HOSTNAME)).collect(Collectors.toList());
assertEquals(1, idbrokerTargets.size());
assertEquals(1, nonIdbrokerTargets.size());
}
use of com.sequenceiq.common.model.diagnostics.AwsDiagnosticParameters in project cloudbreak by hortonworks.
the class DiagnosticsCollectionActions method diagnosticsUploadAction.
@Bean(name = "DIAGNOSTICS_UPLOAD_STATE")
public Action<?, ?> diagnosticsUploadAction() {
return new AbstractDiagnosticsCollectionActions<>(DiagnosticsCollectionEvent.class) {
@Override
protected void doExecute(CommonContext context, DiagnosticsCollectionEvent payload, Map<Object, Object> variables) {
Long resourceId = payload.getResourceId();
String resourceCrn = payload.getResourceCrn();
LOGGER.debug("Flow entered into DIAGNOSTICS_UPLOAD_STATE. resourceCrn: '{}'", resourceCrn);
fireUploadEvent(resourceId, payload);
DiagnosticsCollectionEvent event = DiagnosticsCollectionEvent.builder().withResourceId(resourceId).withResourceCrn(payload.getResourceCrn()).withSelector(DiagnosticsCollectionHandlerSelectors.UPLOAD_DIAGNOSTICS_EVENT.selector()).withParameters(payload.getParameters()).withHosts(payload.getHosts()).withHostGroups(payload.getHostGroups()).withExcludeHosts(payload.getExcludeHosts()).build();
sendEvent(context, event);
}
private void fireUploadEvent(Long resourceId, DiagnosticsCollectionEvent payload) {
DiagnosticParameters parameters = payload.getParameters();
String message;
switch(parameters.getDestination()) {
case CLOUD_STORAGE:
String storageLocation = getStorageLocation(parameters);
message = "Upload location: " + storageLocation;
break;
case ENG:
message = "Engineering will receive the logs.";
break;
case SUPPORT:
if (StringUtils.isNotBlank(parameters.getIssue())) {
message = String.format("Diagnostics have been sent to support. " + "Case number: '%s' Description: '%s'", parameters.getIssue(), parameters.getDescription());
} else {
message = String.format("Diagnostics have been sent to support. " + "A ticket will be created for the diagnostics. Description: '%s'", parameters.getDescription());
}
break;
default:
message = "Location for logs on each node: " + LOCAL_LOG_PATH;
break;
}
cloudbreakEventService.fireCloudbreakEvent(resourceId, UPDATE_IN_PROGRESS.name(), ResourceEvent.STACK_DIAGNOSTICS_UPLOAD_RUNNING, List.of(message));
}
private String getStorageLocation(DiagnosticParameters parameters) {
String storageLocation;
CloudStorageDiagnosticsParameters csDiagnosticsParams = parameters.getCloudStorageDiagnosticsParameters();
if (csDiagnosticsParams instanceof AwsDiagnosticParameters) {
AwsDiagnosticParameters awsParameters = (AwsDiagnosticParameters) csDiagnosticsParams;
storageLocation = "s3://" + Paths.get(awsParameters.getS3Bucket(), awsParameters.getS3Location()).toString();
} else if (csDiagnosticsParams instanceof AzureDiagnosticParameters) {
AzureDiagnosticParameters azureParameters = (AzureDiagnosticParameters) csDiagnosticsParams;
storageLocation = "abfs://" + Paths.get(azureParameters.getAdlsv2StorageContainer(), azureParameters.getAdlsv2StorageLocation()).toString();
} else {
GcsDiagnosticsParameters gcsParameters = (GcsDiagnosticsParameters) csDiagnosticsParams;
storageLocation = "gcs://" + Paths.get(gcsParameters.getBucket(), gcsParameters.getGcsLocation()).toString();
}
return storageLocation;
}
};
}
use of com.sequenceiq.common.model.diagnostics.AwsDiagnosticParameters in project cloudbreak by hortonworks.
the class DiagnosticCloudStorageConverter method loggingResponseToS3.
public AwsDiagnosticParameters loggingResponseToS3(LoggingResponse logging, String region) {
AwsDiagnosticParameters.AwsDiagnosticParametersBuilder awsBuilder = AwsDiagnosticParameters.builder();
S3Config s3Config = s3ConfigGenerator.generateStorageConfig(logging.getStorageLocation());
return awsBuilder.withS3Bucket(s3Config.getBucket()).withS3Location(Paths.get(s3Config.getFolderPrefix(), DIAGNOSTICS_SUFFIX_PATH).toString()).withS3Region(region).build();
}
Aggregations