use of java.nio.channels.ClosedByInterruptException in project buck by facebook.
the class Build method executeBuild.
/**
* If {@code isKeepGoing} is false, then this returns a future that succeeds only if all of
* {@code rulesToBuild} build successfully. Otherwise, this returns a future that should always
* succeed, even if individual rules fail to build. In that case, a failed build rule is indicated
* by a {@code null} value in the corresponding position in the iteration order of
* {@code rulesToBuild}.
* @param targetish The targets to build. All targets in this iterable must be unique.
*/
@SuppressWarnings("PMD.EmptyCatchBlock")
public BuildExecutionResult executeBuild(Iterable<? extends BuildTarget> targetish, boolean isKeepGoing) throws IOException, ExecutionException, InterruptedException {
BuildId buildId = executionContext.getBuildId();
BuildEngineBuildContext buildContext = BuildEngineBuildContext.builder().setBuildContext(BuildContext.builder().setActionGraph(actionGraph).setSourcePathResolver(new SourcePathResolver(new SourcePathRuleFinder(ruleResolver))).setJavaPackageFinder(javaPackageFinder).setEventBus(executionContext.getBuckEventBus()).setAndroidPlatformTargetSupplier(executionContext.getAndroidPlatformTargetSupplier()).build()).setClock(clock).setArtifactCache(artifactCache).setBuildId(buildId).setObjectMapper(objectMapper).putAllEnvironment(executionContext.getEnvironment()).setKeepGoing(isKeepGoing).build();
// It is important to use this logic to determine the set of rules to build rather than
// build.getActionGraph().getNodesWithNoIncomingEdges() because, due to graph enhancement,
// there could be disconnected subgraphs in the DependencyGraph that we do not want to build.
ImmutableSet<BuildTarget> targetsToBuild = StreamSupport.stream(targetish.spliterator(), false).collect(MoreCollectors.toImmutableSet());
// It is important to use this logic to determine the set of rules to build rather than
// build.getActionGraph().getNodesWithNoIncomingEdges() because, due to graph enhancement,
// there could be disconnected subgraphs in the DependencyGraph that we do not want to build.
ImmutableList<BuildRule> rulesToBuild = ImmutableList.copyOf(targetsToBuild.stream().map(buildTarget -> {
try {
return getRuleResolver().requireRule(buildTarget);
} catch (NoSuchBuildTargetException e) {
throw new HumanReadableException("No build rule found for target %s", buildTarget);
}
}).collect(MoreCollectors.toImmutableSet()));
// Calculate and post the number of rules that need to built.
int numRules = buildEngine.getNumRulesToBuild(rulesToBuild);
getExecutionContext().getBuckEventBus().post(BuildEvent.ruleCountCalculated(targetsToBuild, numRules));
// Setup symlinks required when configuring the output path.
createConfiguredBuckOutSymlinks();
List<ListenableFuture<BuildResult>> futures = rulesToBuild.stream().map(rule -> buildEngine.build(buildContext, executionContext, rule)).collect(MoreCollectors.toImmutableList());
// Get the Future representing the build and then block until everything is built.
ListenableFuture<List<BuildResult>> buildFuture = Futures.allAsList(futures);
List<BuildResult> results;
try {
results = buildFuture.get();
if (!isKeepGoing) {
for (BuildResult result : results) {
Throwable thrown = result.getFailure();
if (thrown != null) {
throw new ExecutionException(thrown);
}
}
}
} catch (ExecutionException | InterruptedException | RuntimeException e) {
Throwable t = Throwables.getRootCause(e);
if (e instanceof InterruptedException || t instanceof InterruptedException || t instanceof ClosedByInterruptException) {
try {
buildFuture.cancel(true);
} catch (CancellationException ignored) {
// Rethrow original InterruptedException instead.
}
Thread.currentThread().interrupt();
}
throw e;
}
// Insertion order matters
LinkedHashMap<BuildRule, Optional<BuildResult>> resultBuilder = new LinkedHashMap<>();
Preconditions.checkState(rulesToBuild.size() == results.size());
for (int i = 0, len = rulesToBuild.size(); i < len; i++) {
BuildRule rule = rulesToBuild.get(i);
resultBuilder.put(rule, Optional.ofNullable(results.get(i)));
}
return BuildExecutionResult.builder().setFailures(FluentIterable.from(results).filter(input -> input.getSuccess() == null)).setResults(resultBuilder).build();
}
use of java.nio.channels.ClosedByInterruptException in project buck by facebook.
the class Main method runMainWithExitCode.
/**
* @param buildId an identifier for this command execution.
* @param context an optional NGContext that is present if running inside a Nailgun server.
* @param initTimestamp Value of System.nanoTime() when process got main()/nailMain() invoked.
* @param unexpandedCommandLineArgs command line arguments
* @return an exit code or {@code null} if this is a process that should not exit
*/
@SuppressWarnings("PMD.PrematureDeclaration")
public int runMainWithExitCode(BuildId buildId, Path projectRoot, Optional<NGContext> context, ImmutableMap<String, String> clientEnvironment, CommandMode commandMode, WatchmanWatcher.FreshInstanceAction watchmanFreshInstanceAction, final long initTimestamp, String... unexpandedCommandLineArgs) throws IOException, InterruptedException {
String[] args = BuckArgsMethods.expandAtFiles(unexpandedCommandLineArgs);
// Parse the command line args.
BuckCommand command = new BuckCommand();
AdditionalOptionsCmdLineParser cmdLineParser = new AdditionalOptionsCmdLineParser(command);
try {
cmdLineParser.parseArgument(args);
} catch (CmdLineException e) {
// Can't go through the console for prettification since that needs the BuckConfig, and that
// needs to be created with the overrides, which are parsed from the command line here, which
// required the console to print the message that parsing has failed. So just write to stderr
// and be done with it.
stdErr.println(e.getLocalizedMessage());
stdErr.println("For help see 'buck --help'.");
return 1;
}
{
// Return help strings fast if the command is a help request.
OptionalInt result = command.runHelp(stdErr);
if (result.isPresent()) {
return result.getAsInt();
}
}
// Setup logging.
if (commandMode.isLoggingEnabled()) {
// Reset logging each time we run a command while daemonized.
// This will cause us to write a new log per command.
LOG.debug("Rotating log.");
LogConfig.flushLogs();
LogConfig.setupLogging(command.getLogConfig());
if (LOG.isDebugEnabled()) {
Long gitCommitTimestamp = Long.getLong("buck.git_commit_timestamp");
String buildDateStr;
if (gitCommitTimestamp == null) {
buildDateStr = "(unknown)";
} else {
buildDateStr = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss Z", Locale.US).format(new Date(TimeUnit.SECONDS.toMillis(gitCommitTimestamp)));
}
String buildRev = System.getProperty("buck.git_commit", "(unknown)");
LOG.debug("Starting up (build date %s, rev %s), args: %s", buildDateStr, buildRev, Arrays.toString(args));
LOG.debug("System properties: %s", System.getProperties());
}
}
// Setup filesystem and buck config.
Path canonicalRootPath = projectRoot.toRealPath().normalize();
Config config = Configs.createDefaultConfig(canonicalRootPath, command.getConfigOverrides().getForCell(RelativeCellName.ROOT_CELL_NAME));
ProjectFilesystem filesystem = new ProjectFilesystem(canonicalRootPath, config);
DefaultCellPathResolver cellPathResolver = new DefaultCellPathResolver(filesystem.getRootPath(), config);
BuckConfig buckConfig = new BuckConfig(config, filesystem, architecture, platform, clientEnvironment, cellPathResolver);
ImmutableSet<Path> projectWatchList = ImmutableSet.<Path>builder().add(canonicalRootPath).addAll(buckConfig.getView(ParserConfig.class).getWatchCells() ? cellPathResolver.getTransitivePathMapping().values() : ImmutableList.of()).build();
Optional<ImmutableList<String>> allowedJavaSpecificiationVersions = buckConfig.getAllowedJavaSpecificationVersions();
if (allowedJavaSpecificiationVersions.isPresent()) {
String specificationVersion = System.getProperty("java.specification.version");
boolean javaSpecificationVersionIsAllowed = allowedJavaSpecificiationVersions.get().contains(specificationVersion);
if (!javaSpecificationVersionIsAllowed) {
throw new HumanReadableException("Current Java version '%s' is not in the allowed java specification versions:\n%s", specificationVersion, Joiner.on(", ").join(allowedJavaSpecificiationVersions.get()));
}
}
// Setup the console.
Verbosity verbosity = VerbosityParser.parse(args);
Optional<String> color;
if (context.isPresent() && (context.get().getEnv() != null)) {
String colorString = context.get().getEnv().getProperty(BUCKD_COLOR_DEFAULT_ENV_VAR);
color = Optional.ofNullable(colorString);
} else {
color = Optional.empty();
}
final Console console = new Console(verbosity, stdOut, stdErr, buckConfig.createAnsi(color));
// No more early outs: if this command is not read only, acquire the command semaphore to
// become the only executing read/write command.
// This must happen immediately before the try block to ensure that the semaphore is released.
boolean commandSemaphoreAcquired = false;
boolean shouldCleanUpTrash = false;
if (!command.isReadOnly()) {
commandSemaphoreAcquired = commandSemaphore.tryAcquire();
if (!commandSemaphoreAcquired) {
LOG.warn("Buck server was busy executing a command. Maybe retrying later will help.");
return BUSY_EXIT_CODE;
}
}
try {
if (commandSemaphoreAcquired) {
commandSemaphoreNgClient = context;
}
if (!command.isReadOnly()) {
Optional<String> currentVersion = filesystem.readFileIfItExists(filesystem.getBuckPaths().getCurrentVersionFile());
BuckPaths unconfiguredPaths = filesystem.getBuckPaths().withConfiguredBuckOut(filesystem.getBuckPaths().getBuckOut());
if (!currentVersion.isPresent() || !currentVersion.get().equals(BuckVersion.getVersion()) || (filesystem.exists(unconfiguredPaths.getGenDir(), LinkOption.NOFOLLOW_LINKS) && (filesystem.isSymLink(unconfiguredPaths.getGenDir()) ^ buckConfig.getBuckOutCompatLink()))) {
// Migrate any version-dependent directories (which might be huge) to a trash directory
// so we can delete it asynchronously after the command is done.
moveToTrash(filesystem, console, buildId, filesystem.getBuckPaths().getAnnotationDir(), filesystem.getBuckPaths().getGenDir(), filesystem.getBuckPaths().getScratchDir(), filesystem.getBuckPaths().getResDir());
shouldCleanUpTrash = true;
filesystem.mkdirs(filesystem.getBuckPaths().getCurrentVersionFile().getParent());
filesystem.writeContentsToPath(BuckVersion.getVersion(), filesystem.getBuckPaths().getCurrentVersionFile());
}
}
AndroidBuckConfig androidBuckConfig = new AndroidBuckConfig(buckConfig, platform);
AndroidDirectoryResolver androidDirectoryResolver = new DefaultAndroidDirectoryResolver(filesystem.getRootPath().getFileSystem(), clientEnvironment, androidBuckConfig.getBuildToolsVersion(), androidBuckConfig.getNdkVersion());
ProcessExecutor processExecutor = new DefaultProcessExecutor(console);
Clock clock;
boolean enableThreadCpuTime = buckConfig.getBooleanValue("build", "enable_thread_cpu_time", true);
if (BUCKD_LAUNCH_TIME_NANOS.isPresent()) {
long nanosEpoch = Long.parseLong(BUCKD_LAUNCH_TIME_NANOS.get(), 10);
LOG.verbose("Using nanos epoch: %d", nanosEpoch);
clock = new NanosAdjustedClock(nanosEpoch, enableThreadCpuTime);
} else {
clock = new DefaultClock(enableThreadCpuTime);
}
ParserConfig parserConfig = buckConfig.getView(ParserConfig.class);
try (Watchman watchman = buildWatchman(context, parserConfig, projectWatchList, clientEnvironment, console, clock)) {
final boolean isDaemon = context.isPresent() && (watchman != Watchman.NULL_WATCHMAN);
if (!isDaemon && shouldCleanUpTrash) {
// Clean up the trash on a background thread if this was a
// non-buckd read-write command. (We don't bother waiting
// for it to complete; the thread is a daemon thread which
// will just be terminated at shutdown time.)
TRASH_CLEANER.startCleaningDirectory(filesystem.getBuckPaths().getTrashDir());
}
KnownBuildRuleTypesFactory factory = new KnownBuildRuleTypesFactory(processExecutor, androidDirectoryResolver);
Cell rootCell = CellProvider.createForLocalBuild(filesystem, watchman, buckConfig, command.getConfigOverrides(), factory).getCellByPath(filesystem.getRootPath());
int exitCode;
ImmutableList<BuckEventListener> eventListeners = ImmutableList.of();
ExecutionEnvironment executionEnvironment = new DefaultExecutionEnvironment(clientEnvironment, System.getProperties());
ImmutableList.Builder<ProjectFileHashCache> allCaches = ImmutableList.builder();
// Build up the hash cache, which is a collection of the stateful cell cache and some
// per-run caches.
//
// TODO(Coneko, ruibm, andrewjcg): Determine whether we can use the existing filesystem
// object that is in scope instead of creating a new rootCellProjectFilesystem. The primary
// difference appears to be that filesystem is created with a Config that is used to produce
// ImmutableSet<PathOrGlobMatcher> and BuckPaths for the ProjectFilesystem, whereas this one
// uses the defaults.
ProjectFilesystem rootCellProjectFilesystem = ProjectFilesystem.createNewOrThrowHumanReadableException(rootCell.getFilesystem().getRootPath());
if (isDaemon) {
allCaches.addAll(getFileHashCachesFromDaemon(rootCell));
} else {
getTransitiveCells(rootCell).stream().map(cell -> DefaultFileHashCache.createDefaultFileHashCache(cell.getFilesystem())).forEach(allCaches::add);
allCaches.add(DefaultFileHashCache.createBuckOutFileHashCache(rootCellProjectFilesystem, rootCell.getFilesystem().getBuckPaths().getBuckOut()));
}
// A cache which caches hashes of cell-relative paths which may have been ignore by
// the main cell cache, and only serves to prevent rehashing the same file multiple
// times in a single run.
allCaches.add(DefaultFileHashCache.createDefaultFileHashCache(rootCellProjectFilesystem));
allCaches.addAll(DefaultFileHashCache.createOsRootDirectoriesCaches());
StackedFileHashCache fileHashCache = new StackedFileHashCache(allCaches.build());
Optional<WebServer> webServer = getWebServerIfDaemon(context, rootCell);
Optional<ConcurrentMap<String, WorkerProcessPool>> persistentWorkerPools = getPersistentWorkerPoolsIfDaemon(context, rootCell);
TestConfig testConfig = new TestConfig(buckConfig);
ArtifactCacheBuckConfig cacheBuckConfig = new ArtifactCacheBuckConfig(buckConfig);
ExecutorService diskIoExecutorService = MostExecutors.newSingleThreadExecutor("Disk I/O");
ListeningExecutorService httpWriteExecutorService = getHttpWriteExecutorService(cacheBuckConfig);
ScheduledExecutorService counterAggregatorExecutor = Executors.newSingleThreadScheduledExecutor(new CommandThreadFactory("CounterAggregatorThread"));
VersionControlStatsGenerator vcStatsGenerator;
// Eventually, we'll want to get allow websocket and/or nailgun clients to specify locale
// when connecting. For now, we'll use the default from the server environment.
Locale locale = Locale.getDefault();
// Create a cached thread pool for cpu intensive tasks
Map<ExecutorPool, ListeningExecutorService> executors = new HashMap<>();
executors.put(ExecutorPool.CPU, listeningDecorator(Executors.newCachedThreadPool()));
// Create a thread pool for network I/O tasks
executors.put(ExecutorPool.NETWORK, newDirectExecutorService());
executors.put(ExecutorPool.PROJECT, listeningDecorator(MostExecutors.newMultiThreadExecutor("Project", buckConfig.getNumThreads())));
// Create and register the event buses that should listen to broadcast events.
// If the build doesn't have a daemon create a new instance.
BroadcastEventListener broadcastEventListener = getBroadcastEventListener(isDaemon, rootCell, objectMapper);
// The order of resources in the try-with-resources block is important: the BuckEventBus
// must be the last resource, so that it is closed first and can deliver its queued events
// to the other resources before they are closed.
InvocationInfo invocationInfo = InvocationInfo.of(buildId, isSuperConsoleEnabled(console), isDaemon, command.getSubCommandNameForLogging(), filesystem.getBuckPaths().getLogDir());
try (GlobalStateManager.LoggerIsMappedToThreadScope loggerThreadMappingScope = GlobalStateManager.singleton().setupLoggers(invocationInfo, console.getStdErr(), stdErr, verbosity);
AbstractConsoleEventBusListener consoleListener = createConsoleEventListener(clock, new SuperConsoleConfig(buckConfig), console, testConfig.getResultSummaryVerbosity(), executionEnvironment, webServer, locale, filesystem.getBuckPaths().getLogDir().resolve("test.log"));
AsyncCloseable asyncCloseable = new AsyncCloseable(diskIoExecutorService);
BuckEventBus buildEventBus = new BuckEventBus(clock, buildId);
BroadcastEventListener.BroadcastEventBusClosable broadcastEventBusClosable = broadcastEventListener.addEventBus(buildEventBus);
// stderr.
Closeable logErrorToEventBus = loggerThreadMappingScope.setWriter(createWriterForConsole(consoleListener));
// NOTE: This will only run during the lifetime of the process and will flush on close.
CounterRegistry counterRegistry = new CounterRegistryImpl(counterAggregatorExecutor, buildEventBus, buckConfig.getCountersFirstFlushIntervalMillis(), buckConfig.getCountersFlushIntervalMillis());
PerfStatsTracking perfStatsTracking = new PerfStatsTracking(buildEventBus, invocationInfo);
ProcessTracker processTracker = buckConfig.isProcessTrackerEnabled() && platform != Platform.WINDOWS ? new ProcessTracker(buildEventBus, invocationInfo, isDaemon, buckConfig.isProcessTrackerDeepEnabled()) : null) {
LOG.debug(invocationInfo.toLogLine(args));
buildEventBus.register(HANG_MONITOR.getHangMonitor());
ArtifactCaches artifactCacheFactory = new ArtifactCaches(cacheBuckConfig, buildEventBus, filesystem, executionEnvironment.getWifiSsid(), httpWriteExecutorService, Optional.of(asyncCloseable));
ProgressEstimator progressEstimator = new ProgressEstimator(filesystem.resolve(filesystem.getBuckPaths().getBuckOut()).resolve(ProgressEstimator.PROGRESS_ESTIMATIONS_JSON), buildEventBus, objectMapper);
consoleListener.setProgressEstimator(progressEstimator);
BuildEnvironmentDescription buildEnvironmentDescription = getBuildEnvironmentDescription(executionEnvironment, buckConfig);
Iterable<BuckEventListener> commandEventListeners = command.getSubcommand().isPresent() ? command.getSubcommand().get().getEventListeners(invocationInfo.getLogDirectoryPath(), filesystem) : ImmutableList.of();
Supplier<BuckEventListener> missingSymbolsListenerSupplier = () -> {
return MissingSymbolsHandler.createListener(rootCell.getFilesystem(), rootCell.getKnownBuildRuleTypes().getAllDescriptions(), rootCell.getBuckConfig(), buildEventBus, console, buckConfig.getView(JavaBuckConfig.class).getDefaultJavacOptions(), clientEnvironment);
};
eventListeners = addEventListeners(buildEventBus, rootCell.getFilesystem(), invocationInfo, rootCell.getBuckConfig(), webServer, clock, consoleListener, missingSymbolsListenerSupplier, counterRegistry, commandEventListeners);
if (commandMode == CommandMode.RELEASE && buckConfig.isPublicAnnouncementsEnabled()) {
PublicAnnouncementManager announcementManager = new PublicAnnouncementManager(clock, buildEventBus, consoleListener, buckConfig.getRepository().orElse("unknown"), new RemoteLogBuckConfig(buckConfig), executors.get(ExecutorPool.CPU));
announcementManager.getAndPostAnnouncements();
}
// This needs to be after the registration of the event listener so they can pick it up.
if (watchmanFreshInstanceAction == WatchmanWatcher.FreshInstanceAction.NONE) {
buildEventBus.post(DaemonEvent.newDaemonInstance());
}
if (command.subcommand instanceof AbstractCommand) {
AbstractCommand subcommand = (AbstractCommand) command.subcommand;
VersionControlBuckConfig vcBuckConfig = new VersionControlBuckConfig(buckConfig);
if (!commandMode.equals(CommandMode.TEST) && (subcommand.isSourceControlStatsGatheringEnabled() || vcBuckConfig.shouldGenerateStatistics())) {
vcStatsGenerator = new VersionControlStatsGenerator(diskIoExecutorService, new DefaultVersionControlCmdLineInterfaceFactory(rootCell.getFilesystem().getRootPath(), new PrintStreamProcessExecutorFactory(), vcBuckConfig, buckConfig.getEnvironment()), buildEventBus);
vcStatsGenerator.generateStatsAsync();
}
}
ImmutableList<String> remainingArgs = args.length > 1 ? ImmutableList.copyOf(Arrays.copyOfRange(args, 1, args.length)) : ImmutableList.of();
CommandEvent.Started startedEvent = CommandEvent.started(args.length > 0 ? args[0] : "", remainingArgs, isDaemon, getBuckPID());
buildEventBus.post(startedEvent);
// Create or get Parser and invalidate cached command parameters.
Parser parser = null;
VersionedTargetGraphCache versionedTargetGraphCache = null;
ActionGraphCache actionGraphCache = null;
Optional<RuleKeyCacheRecycler<RuleKey>> defaultRuleKeyFactoryCacheRecycler = Optional.empty();
if (isDaemon) {
try {
Daemon daemon = getDaemon(rootCell, objectMapper);
WatchmanWatcher watchmanWatcher = createWatchmanWatcher(daemon, watchman.getProjectWatches(), daemon.getFileEventBus(), ImmutableSet.<PathOrGlobMatcher>builder().addAll(filesystem.getIgnorePaths()).addAll(DEFAULT_IGNORE_GLOBS).build(), watchman);
parser = getParserFromDaemon(context, rootCell, startedEvent, buildEventBus, watchmanWatcher, watchmanFreshInstanceAction);
versionedTargetGraphCache = daemon.getVersionedTargetGraphCache();
actionGraphCache = daemon.getActionGraphCache();
if (buckConfig.getRuleKeyCaching()) {
LOG.debug("Using rule key calculation caching");
defaultRuleKeyFactoryCacheRecycler = Optional.of(daemon.getDefaultRuleKeyFactoryCacheRecycler());
}
} catch (WatchmanWatcherException | IOException e) {
buildEventBus.post(ConsoleEvent.warning("Watchman threw an exception while parsing file changes.\n%s", e.getMessage()));
}
}
if (versionedTargetGraphCache == null) {
versionedTargetGraphCache = new VersionedTargetGraphCache();
}
if (actionGraphCache == null) {
actionGraphCache = new ActionGraphCache(broadcastEventListener);
}
if (parser == null) {
TypeCoercerFactory typeCoercerFactory = new DefaultTypeCoercerFactory(objectMapper);
parser = new Parser(broadcastEventListener, rootCell.getBuckConfig().getView(ParserConfig.class), typeCoercerFactory, new ConstructorArgMarshaller(typeCoercerFactory));
}
// Because the Parser is potentially constructed before the CounterRegistry,
// we need to manually register its counters after it's created.
//
// The counters will be unregistered once the counter registry is closed.
counterRegistry.registerCounters(parser.getCounters());
JavaUtilsLoggingBuildListener.ensureLogFileIsWritten(rootCell.getFilesystem());
Optional<ProcessManager> processManager;
if (platform == Platform.WINDOWS) {
processManager = Optional.empty();
} else {
processManager = Optional.of(new PkillProcessManager(processExecutor));
}
Supplier<AndroidPlatformTarget> androidPlatformTargetSupplier = createAndroidPlatformTargetSupplier(androidDirectoryResolver, androidBuckConfig, buildEventBus);
// event-listener.
if (command.subcommand instanceof AbstractCommand) {
AbstractCommand subcommand = (AbstractCommand) command.subcommand;
Optional<Path> eventsOutputPath = subcommand.getEventsOutputPath();
if (eventsOutputPath.isPresent()) {
BuckEventListener listener = new FileSerializationEventBusListener(eventsOutputPath.get(), objectMapper);
buildEventBus.register(listener);
}
}
buildEventBus.post(new BuckInitializationDurationEvent(TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - initTimestamp)));
try {
exitCode = command.run(CommandRunnerParams.builder().setConsole(console).setStdIn(stdIn).setCell(rootCell).setAndroidPlatformTargetSupplier(androidPlatformTargetSupplier).setArtifactCacheFactory(artifactCacheFactory).setBuckEventBus(buildEventBus).setParser(parser).setPlatform(platform).setEnvironment(clientEnvironment).setJavaPackageFinder(rootCell.getBuckConfig().getView(JavaBuckConfig.class).createDefaultJavaPackageFinder()).setObjectMapper(objectMapper).setClock(clock).setProcessManager(processManager).setPersistentWorkerPools(persistentWorkerPools).setWebServer(webServer).setBuckConfig(buckConfig).setFileHashCache(fileHashCache).setExecutors(executors).setBuildEnvironmentDescription(buildEnvironmentDescription).setVersionedTargetGraphCache(versionedTargetGraphCache).setActionGraphCache(actionGraphCache).setKnownBuildRuleTypesFactory(factory).setInvocationInfo(Optional.of(invocationInfo)).setDefaultRuleKeyFactoryCacheRecycler(defaultRuleKeyFactoryCacheRecycler).build());
} catch (InterruptedException | ClosedByInterruptException e) {
exitCode = INTERRUPTED_EXIT_CODE;
buildEventBus.post(CommandEvent.interrupted(startedEvent, INTERRUPTED_EXIT_CODE));
throw e;
}
// Let's avoid an infinite loop
if (exitCode == BUSY_EXIT_CODE) {
// Some loss of info here, but better than looping
exitCode = FAIL_EXIT_CODE;
LOG.error("Buck return with exit code %d which we use to indicate busy status. " + "This is probably propagating an exit code from a sub process or tool. " + "Coercing to %d to avoid retries.", BUSY_EXIT_CODE, FAIL_EXIT_CODE);
}
// Wait for HTTP writes to complete.
closeHttpExecutorService(cacheBuckConfig, Optional.of(buildEventBus), httpWriteExecutorService);
closeExecutorService("CounterAggregatorExecutor", counterAggregatorExecutor, COUNTER_AGGREGATOR_SERVICE_TIMEOUT_SECONDS);
buildEventBus.post(CommandEvent.finished(startedEvent, exitCode));
} catch (Throwable t) {
LOG.debug(t, "Failing build on exception.");
closeHttpExecutorService(cacheBuckConfig, Optional.empty(), httpWriteExecutorService);
closeDiskIoExecutorService(diskIoExecutorService);
flushEventListeners(console, buildId, eventListeners);
throw t;
} finally {
if (commandSemaphoreAcquired) {
commandSemaphoreNgClient = Optional.empty();
BgProcessKiller.disarm();
// Allow another command to execute while outputting traces.
commandSemaphore.release();
commandSemaphoreAcquired = false;
}
if (isDaemon && shouldCleanUpTrash) {
// Clean up the trash in the background if this was a buckd
// read-write command. (We don't bother waiting for it to
// complete; the cleaner will ensure subsequent cleans are
// serialized with this one.)
TRASH_CLEANER.startCleaningDirectory(filesystem.getBuckPaths().getTrashDir());
}
// shut down the cached thread pools
for (ExecutorPool p : executors.keySet()) {
closeExecutorService(p.toString(), executors.get(p), EXECUTOR_SERVICES_TIMEOUT_SECONDS);
}
}
if (context.isPresent() && !rootCell.getBuckConfig().getFlushEventsBeforeExit()) {
// Avoid client exit triggering client disconnection handling.
context.get().in.close();
// Allow nailgun client to exit while outputting traces.
context.get().exit(exitCode);
}
closeDiskIoExecutorService(diskIoExecutorService);
flushEventListeners(console, buildId, eventListeners);
return exitCode;
}
} finally {
if (commandSemaphoreAcquired) {
commandSemaphoreNgClient = Optional.empty();
BgProcessKiller.disarm();
commandSemaphore.release();
}
}
}
use of java.nio.channels.ClosedByInterruptException in project storm by apache.
the class HealthCheck method processScript.
public static String processScript(Map conf, String script) {
Thread interruptThread = null;
try {
Process process = Runtime.getRuntime().exec(script);
final long timeout = (long) (conf.get(Config.STORM_HEALTH_CHECK_TIMEOUT_MS));
final Thread curThread = Thread.currentThread();
// kill process when timeout
interruptThread = new Thread(new Runnable() {
@Override
public void run() {
try {
Thread.sleep(timeout);
curThread.interrupt();
} catch (InterruptedException e) {
// Ignored
}
}
});
interruptThread.start();
process.waitFor();
interruptThread.interrupt();
curThread.interrupted();
if (process.exitValue() != 0) {
String str;
InputStream stdin = process.getInputStream();
BufferedReader reader = new BufferedReader(new InputStreamReader(stdin));
while ((str = reader.readLine()) != null) {
if (str.startsWith("ERROR")) {
return FAILED;
}
}
return SUCCESS;
}
return FAILED_WITH_EXIT_CODE;
} catch (InterruptedException | ClosedByInterruptException e) {
LOG.warn("Script: {} timed out.", script);
return TIMEOUT;
} catch (Exception e) {
LOG.warn("Script failed with exception: ", e);
return FAILED_WITH_EXIT_CODE;
} finally {
if (interruptThread != null)
interruptThread.interrupt();
}
}
use of java.nio.channels.ClosedByInterruptException in project graylog2-server by Graylog2.
the class KafkaJournal method read.
public List<JournalReadEntry> read(long readOffset, long requestedMaximumCount) {
// Always read at least one!
final long maximumCount = Math.max(1, requestedMaximumCount);
long maxOffset = readOffset + maximumCount;
if (shuttingDown) {
return Collections.emptyList();
}
final List<JournalReadEntry> messages = new ArrayList<>(Ints.saturatedCast(maximumCount));
try (Timer.Context ignored = readTime.time()) {
final long logStartOffset = getLogStartOffset();
if (readOffset < logStartOffset) {
LOG.info("Read offset {} before start of log at {}, starting to read from the beginning of the journal.", readOffset, logStartOffset);
readOffset = logStartOffset;
maxOffset = readOffset + maximumCount;
}
LOG.debug("Requesting to read a maximum of {} messages (or 5MB) from the journal, offset interval [{}, {})", maximumCount, readOffset, maxOffset);
// TODO benchmark and make read-ahead strategy configurable for performance tuning
final MessageSet messageSet = kafkaLog.read(readOffset, 5 * 1024 * 1024, Option.<Object>apply(maxOffset)).messageSet();
final Iterator<MessageAndOffset> iterator = messageSet.iterator();
long firstOffset = Long.MIN_VALUE;
long lastOffset = Long.MIN_VALUE;
long totalBytes = 0;
while (iterator.hasNext()) {
final MessageAndOffset messageAndOffset = iterator.next();
if (firstOffset == Long.MIN_VALUE)
firstOffset = messageAndOffset.offset();
// always remember the last seen offset for debug purposes below
lastOffset = messageAndOffset.offset();
final byte[] payloadBytes = ByteBufferUtils.readBytes(messageAndOffset.message().payload());
if (LOG.isTraceEnabled()) {
final byte[] keyBytes = ByteBufferUtils.readBytes(messageAndOffset.message().key());
LOG.trace("Read message {} contains {}", bytesToHex(keyBytes), bytesToHex(payloadBytes));
}
totalBytes += payloadBytes.length;
messages.add(new JournalReadEntry(payloadBytes, messageAndOffset.offset()));
// remember where to read from
nextReadOffset = messageAndOffset.nextOffset();
}
if (messages.isEmpty()) {
LOG.debug("No messages available to read for offset interval [{}, {}).", readOffset, maxOffset);
} else {
LOG.debug("Read {} messages, total payload size {}, from journal, offset interval [{}, {}], requested read at {}", messages.size(), totalBytes, firstOffset, lastOffset, readOffset);
}
} catch (OffsetOutOfRangeException e) {
// This is fine, the reader tries to read faster than the writer committed data. Next read will get the data.
LOG.debug("Offset out of range, no messages available starting at offset {}", readOffset);
} catch (Exception e) {
// sigh.
if (shuttingDown) {
LOG.debug("Caught exception during shutdown, ignoring it because we might have been blocked on a read.");
return Collections.emptyList();
}
//noinspection ConstantConditions
if (e instanceof ClosedByInterruptException) {
LOG.debug("Interrupted while reading from journal, during shutdown this is harmless and ignored.", e);
} else {
throw e;
}
}
readMessages.mark(messages.size());
return messages;
}
use of java.nio.channels.ClosedByInterruptException in project intellij-plugins by JetBrains.
the class LibraryManager method sortLibraries.
@NotNull
private SortResult sortLibraries(LibrarySorter sorter, LibraryCollector collector, Condition<String> isExternal, String key, boolean isSdk) throws InitException {
final List<Library> libraries = isSdk ? collector.sdkLibraries : collector.externalLibraries;
try {
final int id = data.librarySets.enumerate(key);
SortResult result = data.librarySets.get(key);
if (result == null) {
result = sorter.sort(libraries, new File(appDir, LibrariesData.NAME_PREFIX + Integer.toString(id) + SWF_EXTENSION), isExternal, isSdk);
data.librarySets.put(key, result);
} else {
final String[] libraryPaths = result.libraryPaths;
final List<Library> filteredLibraries = new ArrayList<>(libraryPaths.length);
for (Library library : libraries) {
if (ArrayUtil.indexOf(libraryPaths, library.getFile().getPath()) != -1) {
filteredLibraries.add(library);
}
}
result = new SortResult(result.definitionMap, filteredLibraries);
}
result.id = id;
return result;
} catch (ClosedByInterruptException e) {
throw new InitException(e);
} catch (Throwable e) {
String technicalMessage = "Flex SDK " + collector.getFlexSdkVersion();
final Attachment[] attachments = new Attachment[libraries.size()];
try {
for (int i = 0, librariesSize = libraries.size(); i < librariesSize; i++) {
Library library = libraries.get(i);
technicalMessage += " " + library.getFile().getPath();
attachments[i] = AttachmentFactory.createAttachment(library.getFile());
}
} catch (Throwable innerE) {
technicalMessage += " Cannot collect library catalog files due to " + ExceptionUtil.getThrowableText(innerE);
}
throw new InitException(e, "error.sort.libraries", attachments, technicalMessage);
}
}
Aggregations