use of org.apache.commons.lang3.exception.ExceptionUtils.getRootCauseMessage in project backend by CatalogueOfLife.
the class SectorRunnable method run.
@Override
public void run() {
LoggingUtils.setSectorMDC(sectorKey, state.getAttempt(), getClass());
boolean failed = true;
try {
state.setStarted(LocalDateTime.now());
state.setState(ImportState.PREPARING);
LOG.info("Start {} for sector {}", this.getClass().getSimpleName(), sectorKey);
init();
doWork();
state.setState(ImportState.ANALYZING);
LOG.info("Build metrics for sector {}", sectorKey);
doMetrics();
state.setState(ImportState.INDEXING);
LOG.info("Update search index for sector {}", sectorKey);
updateSearchIndex();
state.setState(ImportState.FINISHED);
LOG.info("Completed {} for sector {}", this.getClass().getSimpleName(), sectorKey);
failed = false;
successCallback.accept(this);
} catch (InterruptedException e) {
LOG.warn("Interrupted {}", this, e);
state.setState(ImportState.CANCELED);
errorCallback.accept(this, e);
} catch (Exception e) {
LOG.error("Failed {}", this, e);
state.setError(ExceptionUtils.getRootCauseMessage(e));
state.setState(ImportState.FAILED);
errorCallback.accept(this, e);
} finally {
state.setFinished(LocalDateTime.now());
// persist sector import
try (SqlSession session = factory.openSession(true)) {
session.getMapper(SectorImportMapper.class).update(state);
// update sector with latest attempt on success only for true syncs
if (!failed && this instanceof SectorSync) {
session.getMapper(SectorMapper.class).updateLastSync(sectorKey, state.getAttempt());
}
}
LoggingUtils.removeSectorMDC();
}
}
use of org.apache.commons.lang3.exception.ExceptionUtils.getRootCauseMessage in project spring-cloud-dataflow by spring-cloud.
the class MetricsReplicationEnvironmentPostProcessor method postProcessEnvironment.
@Override
public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
try {
// Disable (only for this processor) the failures on unresolved placeholders.
environment.setIgnoreUnresolvableNestedPlaceholders(true);
Properties additionalProperties = new Properties();
// 1. Infer the Monitoring Dashboard properties from the server's metrics configuration properties.
this.inferMonitoringDashboardProperties(environment, additionalProperties);
// and applicationProperties.task.
if (environment.getProperty(MONITORING_PREFIX + ".property-replication", Boolean.class, true)) {
// Callback function that checks if the input property is set the server's configuration. If it is then
// the property is replicated as a common Stream and Task property.
Consumer<String> propertyReplicator = metricsPropertyName -> {
if (environment.containsProperty(metricsPropertyName)) {
try {
String serverPropertyValue = environment.getProperty(metricsPropertyName);
// Overrides only the Stream applicationProperties that have not been set explicitly.
String commonStreamPropertyName = COMMON_APPLICATION_PREFIX + ".stream." + metricsPropertyName;
if (!environment.containsProperty(commonStreamPropertyName)) {
logger.info("Replicate metrics property:" + commonStreamPropertyName + "=" + serverPropertyValue);
// if a property with same key occurs multiple times only the first is set.
additionalProperties.putIfAbsent(commonStreamPropertyName, serverPropertyValue);
}
// Overrides only the Task applicationProperties that have not been set explicitly.
String commonTaskPropertyName = COMMON_APPLICATION_PREFIX + ".task." + metricsPropertyName;
if (!environment.containsProperty(commonTaskPropertyName)) {
logger.info("Replicate metrics property:" + commonTaskPropertyName + "=" + serverPropertyValue);
// if a property with same key occurs multiple times only the first is set.
additionalProperties.putIfAbsent(commonTaskPropertyName, serverPropertyValue);
}
} catch (Throwable throwable) {
logger.error("Failed with replicating {}, because of {}", metricsPropertyName, ExceptionUtils.getRootCauseMessage(throwable));
}
}
};
this.replicateServerMetricsPropertiesToStreamAndTask(environment, WavefrontProperties.class, propertyReplicator);
this.replicateServerMetricsPropertiesToStreamAndTask(environment, InfluxProperties.class, propertyReplicator);
this.replicateServerMetricsPropertiesToStreamAndTask(environment, PrometheusProperties.class, propertyReplicator);
this.replicateServerMetricsPropertiesToStreamAndTask(environment, PrometheusRSocketClientProperties.class, propertyReplicator);
}
// This post-processor is called multiple times but sets the properties only once.
if (!additionalProperties.isEmpty()) {
PropertiesPropertySource propertiesPropertySource = new PropertiesPropertySource(PROPERTY_SOURCE_KEY_NAME, additionalProperties);
environment.getPropertySources().addLast(propertiesPropertySource);
}
} finally {
environment.setIgnoreUnresolvableNestedPlaceholders(false);
}
}
use of org.apache.commons.lang3.exception.ExceptionUtils.getRootCauseMessage in project build-info by jfrog.
the class GoDependencyTree method createDependencyTree.
/**
* Create Go dependency tree of actually used dependencies.
*
* @param goDriver - Go driver
* @param logger - The logger
* @param verbose - verbose logging
* @return Go dependency tree
* @throws IOException in case of any I/O error.
*/
public static DependencyTree createDependencyTree(GoDriver goDriver, Log logger, boolean verbose) throws IOException {
// Run go mod graph.
CommandResults goGraphResult = goDriver.modGraph(verbose);
String[] dependenciesGraph = goGraphResult.getRes().split("\\r?\\n");
// Run go list -f "{{with .Module}}{{.Path}} {{.Version}}{{end}}" all
CommandResults usedModulesResults;
try {
usedModulesResults = goDriver.getUsedModules(false, false);
} catch (IOException e) {
// Errors occurred during running "go list". Run again and this time ignore errors.
usedModulesResults = goDriver.getUsedModules(false, true);
logger.warn("Errors occurred during building the Go dependency tree. The dependency tree may be incomplete:" + System.lineSeparator() + ExceptionUtils.getRootCauseMessage(e));
}
Set<String> usedDependencies = Arrays.stream(usedModulesResults.getRes().split("\\r?\\n")).map(String::trim).map(usedModule -> usedModule.replace(" ", "@")).collect(Collectors.toSet());
// Create root node.
String rootPackageName = goDriver.getModuleName();
DependencyTree rootNode = new DependencyTree(rootPackageName);
rootNode.setMetadata(true);
// Build dependency tree.
Map<String, List<String>> dependenciesMap = new HashMap<>();
populateDependenciesMap(dependenciesGraph, usedDependencies, dependenciesMap);
populateDependencyTree(rootNode, rootPackageName, dependenciesMap, logger);
return rootNode;
}
use of org.apache.commons.lang3.exception.ExceptionUtils.getRootCauseMessage in project pulsar by yahoo.
the class ManagedLedgerTest method testConcurrentOpenCursorShouldNotHaveConcurrentAccessOfUninitializedCursors.
@Test
public void testConcurrentOpenCursorShouldNotHaveConcurrentAccessOfUninitializedCursors() throws Exception {
ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("ConcurrentAccessOfUninitializedCursors");
final CompletableFuture<ManagedCursor> cursorFuture = new CompletableFuture<>();
final CompletableFuture<Void> removingFuture = new CompletableFuture<>();
final CompletableFuture<Void> concurrentAccessFuture = new CompletableFuture<>();
final Throwable concurrentAccessTimeout = new TimeoutException();
cachedExecutor.execute(() -> {
removingFuture.join();
CompletableFuture<Void> lockingFuture = new CompletableFuture<>();
cachedExecutor.execute(() -> {
try {
lockingFuture.join();
// Gives `synchronized (ledger)` a chance to complete if it got lock immediately.
Thread.sleep(2);
// Normally, following code will process after success or failure contention of
// `synchronized (ledger)`. Theoretically, it is possible that following code
// complete before contention of `synchronized (ledger)` block, but it is rare
// in practice, and it is not harmful as it produces only false positive cases.
concurrentAccessFuture.completeExceptionally(concurrentAccessTimeout);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
}
});
lockingFuture.complete(null);
synchronized (ledger) {
concurrentAccessFuture.complete(null);
}
});
Map<String, CompletableFuture<ManagedCursor>> uninitializedCursors = ledger.uninitializedCursors;
Map<String, CompletableFuture<ManagedCursor>> spyUninitializedCursors = spy(uninitializedCursors);
doAnswer(mock -> {
removingFuture.complete(null);
try {
// Access of uninitializedCursors should guarded by synchronized(ledger),
// so there are must be no concurrent accesses in this scope. If we get this
// future successfully, then there is a concurrent access.
concurrentAccessFuture.get();
Throwable throwable = new IllegalStateException("Detecting concurrent access of uninitializedCursors");
cursorFuture.completeExceptionally(throwable);
} catch (Exception ex) {
assertSame(ExceptionUtils.getRootCause(ex), concurrentAccessTimeout);
}
return mock.callRealMethod();
}).when(spyUninitializedCursors).remove(anyString());
setFieldValue(ManagedLedgerImpl.class, ledger, "uninitializedCursors", spyUninitializedCursors);
cachedExecutor.execute(() -> {
try {
ledger.asyncOpenCursor("c1", new OpenCursorCallback() {
@Override
public void openCursorFailed(ManagedLedgerException exception, Object ctx) {
cursorFuture.completeExceptionally(exception);
}
@Override
public void openCursorComplete(ManagedCursor cursor, Object ctx) {
cursorFuture.complete(cursor);
}
}, null);
} catch (Exception e) {
cursorFuture.completeExceptionally(e);
}
});
try {
ManagedCursor cursor = cursorFuture.get();
assertNotNull(cursor);
} catch (Exception ex) {
fail(ExceptionUtils.getRootCauseMessage(ex));
} finally {
ledger.close();
}
}
use of org.apache.commons.lang3.exception.ExceptionUtils.getRootCauseMessage in project azure-tools-for-java by Microsoft.
the class SparkBatchJobDebuggerRunner method execute.
/**
* Execute Spark remote debugging action, refer to {@link GenericDebuggerRunner#execute(ExecutionEnvironment)}
* implementations, some internal API leveraged.
*
* @param environment the execution environment
* @throws ExecutionException the exception in execution
*/
@Override
public void execute(final ExecutionEnvironment environment) throws ExecutionException {
final RunProfileState state = environment.getState();
if (state == null) {
return;
}
final Operation operation = environment.getUserData(TelemetryKeys.OPERATION);
final AsyncPromise<ExecutionEnvironment> jobDriverEnvReady = new AsyncPromise<>();
final SparkBatchRemoteDebugState submissionState = (SparkBatchRemoteDebugState) state;
final SparkSubmitModel submitModel = submissionState.getSubmitModel();
// Create SSH debug session firstly
final SparkBatchDebugSession session;
try {
session = SparkBatchDebugSession.factoryByAuth(getSparkJobUrl(submitModel), submitModel.getAdvancedConfigModel()).open().verifyCertificate();
} catch (final Exception e) {
final ExecutionException exp = new ExecutionException("Failed to create SSH session for debugging. " + ExceptionUtils.getRootCauseMessage(e));
EventUtil.logErrorClassNameOnlyWithComplete(operation, ErrorType.systemError, exp, null, null);
throw exp;
}
final Project project = submitModel.getProject();
final ExecutionManager executionManager = ExecutionManager.getInstance(project);
final IdeaSchedulers schedulers = new IdeaSchedulers(project);
final PublishSubject<SparkBatchJobSubmissionEvent> debugEventSubject = PublishSubject.create();
final ISparkBatchDebugJob sparkDebugBatch = (ISparkBatchDebugJob) submissionState.getSparkBatch().clone();
final PublishSubject<SparkLogLine> ctrlSubject = (PublishSubject<SparkLogLine>) sparkDebugBatch.getCtrlSubject();
final SparkBatchJobRemoteDebugProcess driverDebugProcess = new SparkBatchJobRemoteDebugProcess(schedulers, session, sparkDebugBatch, submitModel.getArtifactPath().orElseThrow(() -> new ExecutionException("No artifact selected")), submitModel.getSubmissionParameter().getMainClassName(), submitModel.getAdvancedConfigModel(), ctrlSubject);
final SparkBatchJobDebugProcessHandler driverDebugHandler = new SparkBatchJobDebugProcessHandler(project, driverDebugProcess, debugEventSubject);
// Prepare an independent submission console
final ConsoleViewImpl submissionConsole = new ConsoleViewImpl(project, true);
final RunContentDescriptor submissionDesc = new RunContentDescriptor(submissionConsole, driverDebugHandler, submissionConsole.getComponent(), String.format("Submit %s to cluster %s", submitModel.getSubmissionParameter().getMainClassName(), submitModel.getSubmissionParameter().getClusterName()));
// Show the submission console view
ExecutionManager.getInstance(project).getContentManager().showRunContent(environment.getExecutor(), submissionDesc);
// Use the submission console to display the deployment ctrl message
final Subscription jobSubscription = ctrlSubject.subscribe(typedMessage -> {
final String line = typedMessage.getRawLog() + "\n";
switch(typedMessage.getMessageInfoType()) {
case Error:
submissionConsole.print(line, ConsoleViewContentType.ERROR_OUTPUT);
break;
case Info:
submissionConsole.print(line, ConsoleViewContentType.NORMAL_OUTPUT);
break;
case Log:
submissionConsole.print(line, ConsoleViewContentType.SYSTEM_OUTPUT);
break;
case Warning:
submissionConsole.print(line, ConsoleViewContentType.LOG_WARNING_OUTPUT);
break;
}
}, err -> {
submissionConsole.print(ExceptionUtils.getRootCauseMessage(err), ConsoleViewContentType.ERROR_OUTPUT);
final String errMsg = "The Spark job remote debug is cancelled due to " + ExceptionUtils.getRootCauseMessage(err);
jobDriverEnvReady.setError(errMsg);
EventUtil.logErrorClassNameOnlyWithComplete(operation, ErrorType.systemError, new UncheckedExecutionException(errMsg, err), null, null);
}, () -> {
if (Optional.ofNullable(driverDebugHandler.getUserData(ProcessHandler.TERMINATION_REQUESTED)).orElse(false)) {
final String errMsg = "The Spark job remote debug is cancelled by user.";
jobDriverEnvReady.setError(errMsg);
final Map<String, String> props = ImmutableMap.of("isDebugCancelled", "true");
EventUtil.logErrorClassNameOnlyWithComplete(operation, ErrorType.userError, new ExecutionException(errMsg), props, null);
}
});
// Call after completed or error
debugEventSubject.subscribeOn(Schedulers.io()).doAfterTerminate(session::close).subscribe(debugEvent -> {
try {
if (debugEvent instanceof SparkBatchRemoteDebugHandlerReadyEvent) {
final SparkBatchRemoteDebugHandlerReadyEvent handlerReadyEvent = (SparkBatchRemoteDebugHandlerReadyEvent) debugEvent;
final SparkBatchDebugJobJdbPortForwardedEvent jdbReadyEvent = handlerReadyEvent.getJdbPortForwardedEvent();
if (!jdbReadyEvent.getLocalJdbForwardedPort().isPresent()) {
return;
}
final int localPort = jdbReadyEvent.getLocalJdbForwardedPort().get();
final ExecutionEnvironment forkEnv = forkEnvironment(environment, jdbReadyEvent.getRemoteHost().orElse("unknown"), jdbReadyEvent.isDriver());
final RunProfile runProfile = forkEnv.getRunProfile();
if (!(runProfile instanceof LivySparkBatchJobRunConfiguration)) {
ctrlSubject.onError(new UnsupportedOperationException("Only supports LivySparkBatchJobRunConfiguration type, but got type" + runProfile.getClass().getCanonicalName()));
return;
}
// Reuse the driver's Spark batch job
((LivySparkBatchJobRunConfiguration) runProfile).setSparkRemoteBatch(sparkDebugBatch);
final SparkBatchRemoteDebugState forkState = jdbReadyEvent.isDriver() ? submissionState : (SparkBatchRemoteDebugState) forkEnv.getState();
if (forkState == null) {
return;
}
// Set the debug connection to localhost and local forwarded port to the state
forkState.setRemoteConnection(new RemoteConnection(true, "localhost", Integer.toString(localPort), false));
// Prepare the debug tab console view UI
SparkJobLogConsoleView jobOutputView = new SparkJobLogConsoleView(project);
// Get YARN container log URL port
int containerLogUrlPort = ((SparkBatchRemoteDebugJob) driverDebugProcess.getSparkJob()).getYarnContainerLogUrlPort().toBlocking().single();
// Parse container ID and host URL from driver console view
jobOutputView.getSecondaryConsoleView().addMessageFilter((line, entireLength) -> {
Matcher matcher = Pattern.compile("Launching container (\\w+).* on host ([a-zA-Z_0-9-.]+)", Pattern.CASE_INSENSITIVE).matcher(line);
while (matcher.find()) {
String containerId = matcher.group(1);
// TODO: get port from somewhere else rather than hard code here
URI hostUri = URI.create(String.format("http://%s:%d", matcher.group(2), containerLogUrlPort));
debugEventSubject.onNext(new SparkBatchJobExecutorCreatedEvent(hostUri, containerId));
}
return null;
});
jobOutputView.attachToProcess(handlerReadyEvent.getDebugProcessHandler());
ExecutionResult result = new DefaultExecutionResult(jobOutputView, handlerReadyEvent.getDebugProcessHandler());
forkState.setExecutionResult(result);
forkState.setConsoleView(jobOutputView.getSecondaryConsoleView());
forkState.setRemoteProcessCtrlLogHandler(handlerReadyEvent.getDebugProcessHandler());
if (jdbReadyEvent.isDriver()) {
// Let the debug console view to handle the control log
jobSubscription.unsubscribe();
// Resolve job driver promise, handle the driver VM attaching separately
jobDriverEnvReady.setResult(forkEnv);
} else {
// Start Executor debugging
executionManager.startRunProfile(forkEnv, () -> toIdeaPromise(attachAndDebug(forkEnv, forkState)));
}
} else if (debugEvent instanceof SparkBatchJobExecutorCreatedEvent) {
SparkBatchJobExecutorCreatedEvent executorCreatedEvent = (SparkBatchJobExecutorCreatedEvent) debugEvent;
final String containerId = executorCreatedEvent.getContainerId();
final SparkBatchRemoteDebugJob debugJob = (SparkBatchRemoteDebugJob) driverDebugProcess.getSparkJob();
URI internalHostUri = executorCreatedEvent.getHostUri();
URI executorLogUrl = debugJob.convertToPublicLogUri(internalHostUri).map(uri -> uri.resolve(String.format("node/containerlogs/%s/livy", containerId))).toBlocking().singleOrDefault(internalHostUri);
// Create an Executor Debug Process
SparkBatchJobRemoteDebugExecutorProcess executorDebugProcess = new SparkBatchJobRemoteDebugExecutorProcess(schedulers, debugJob, internalHostUri.getHost(), driverDebugProcess.getDebugSession(), executorLogUrl.toString());
SparkBatchJobDebugProcessHandler executorDebugHandler = new SparkBatchJobDebugProcessHandler(project, executorDebugProcess, debugEventSubject);
executorDebugHandler.getRemoteDebugProcess().start();
}
} catch (final ExecutionException e) {
EventUtil.logErrorClassNameOnlyWithComplete(operation, ErrorType.systemError, new UncheckedExecutionException(e), null, null);
throw new UncheckedExecutionException(e);
}
});
driverDebugHandler.getRemoteDebugProcess().start();
// Driver side execute, leverage Intellij Async Promise, to wait for the Spark app deployed
executionManager.startRunProfile(environment, () -> jobDriverEnvReady.thenAsync(driverEnv -> toIdeaPromise(attachAndDebug(driverEnv, state))));
}
Aggregations