use of com.google.devtools.build.lib.events.EventHandler in project bazel by bazelbuild.
the class WorkerSpawnStrategy method actuallyExec.
private void actuallyExec(Spawn spawn, ActionExecutionContext actionExecutionContext, AtomicReference<Class<? extends SpawnActionContext>> writeOutputFiles) throws ExecException, InterruptedException {
Executor executor = actionExecutionContext.getExecutor();
EventHandler eventHandler = executor.getEventHandler();
if (executor.reportsSubcommands()) {
executor.reportSubcommand(spawn);
}
// We assume that the spawn to be executed always gets at least one @flagfile.txt or
// --flagfile=flagfile.txt argument, which contains the flags related to the work itself (as
// opposed to start-up options for the executed tool). Thus, we can extract those elements from
// its args and put them into the WorkRequest instead.
List<String> flagfiles = new ArrayList<>();
List<String> startupArgs = new ArrayList<>();
for (String arg : spawn.getArguments()) {
if (FLAG_FILE_PATTERN.matcher(arg).matches()) {
flagfiles.add(arg);
} else {
startupArgs.add(arg);
}
}
if (flagfiles.isEmpty()) {
throw new UserExecException(String.format(ERROR_MESSAGE_PREFIX + REASON_NO_FLAGFILE, spawn.getMnemonic()));
}
if (Iterables.isEmpty(spawn.getToolFiles())) {
throw new UserExecException(String.format(ERROR_MESSAGE_PREFIX + REASON_NO_TOOLS, spawn.getMnemonic()));
}
FileOutErr outErr = actionExecutionContext.getFileOutErr();
ImmutableList<String> args = ImmutableList.<String>builder().addAll(startupArgs).add("--persistent_worker").addAll(MoreObjects.firstNonNull(extraFlags.get(spawn.getMnemonic()), ImmutableList.<String>of())).build();
ImmutableMap<String, String> env = spawn.getEnvironment();
try {
ActionInputFileCache inputFileCache = actionExecutionContext.getActionInputFileCache();
HashCode workerFilesHash = WorkerFilesHash.getWorkerFilesHash(spawn.getToolFiles(), actionExecutionContext);
Map<PathFragment, Path> inputFiles = new SpawnHelpers(execRoot).getMounts(spawn, actionExecutionContext);
Set<PathFragment> outputFiles = SandboxHelpers.getOutputFiles(spawn);
WorkerKey key = new WorkerKey(args, env, execRoot, spawn.getMnemonic(), workerFilesHash, inputFiles, outputFiles, writeOutputFiles != null);
WorkRequest.Builder requestBuilder = WorkRequest.newBuilder();
for (String flagfile : flagfiles) {
expandArgument(requestBuilder, flagfile);
}
List<ActionInput> inputs = ActionInputHelper.expandArtifacts(spawn.getInputFiles(), actionExecutionContext.getArtifactExpander());
for (ActionInput input : inputs) {
byte[] digestBytes = inputFileCache.getDigest(input);
ByteString digest;
if (digestBytes == null) {
digest = ByteString.EMPTY;
} else {
digest = ByteString.copyFromUtf8(HashCode.fromBytes(digestBytes).toString());
}
requestBuilder.addInputsBuilder().setPath(input.getExecPathString()).setDigest(digest).build();
}
WorkResponse response = execInWorker(eventHandler, key, requestBuilder.build(), maxRetries, writeOutputFiles);
outErr.getErrorStream().write(response.getOutputBytes().toByteArray());
if (response.getExitCode() != 0) {
throw new UserExecException(String.format("Worker process sent response with exit code: %d.", response.getExitCode()));
}
} catch (IOException e) {
String message = CommandFailureUtils.describeCommandFailure(verboseFailures, spawn.getArguments(), env, execRoot.getPathString());
throw new UserExecException(message, e);
}
}
use of com.google.devtools.build.lib.events.EventHandler in project bazel by bazelbuild.
the class RemoteSpawnStrategy method exec.
/** Executes the given {@code spawn}. */
@Override
public void exec(Spawn spawn, ActionExecutionContext actionExecutionContext) throws ExecException, InterruptedException {
ActionKey actionKey = null;
String mnemonic = spawn.getMnemonic();
Executor executor = actionExecutionContext.getExecutor();
EventHandler eventHandler = executor.getEventHandler();
RemoteActionCache actionCache = null;
RemoteWorkExecutor workExecutor = null;
if (spawn.isRemotable()) {
// action to enable server-side parallelism (need a different gRPC channel per action).
try {
if (ConcurrentMapFactory.isRemoteCacheOptions(options)) {
actionCache = new ConcurrentMapActionCache(ConcurrentMapFactory.create(options));
} else if (GrpcActionCache.isRemoteCacheOptions(options)) {
actionCache = new GrpcActionCache(options);
}
if (actionCache != null && RemoteWorkExecutor.isRemoteExecutionOptions(options)) {
workExecutor = new RemoteWorkExecutor(options);
}
} catch (InvalidConfigurationException e) {
eventHandler.handle(Event.warn(e.toString()));
}
}
if (!spawn.isRemotable() || actionCache == null) {
standaloneStrategy.exec(spawn, actionExecutionContext);
return;
}
if (workExecutor == null) {
execLocally(spawn, actionExecutionContext, actionCache, actionKey);
return;
}
if (executor.reportsSubcommands()) {
executor.reportSubcommand(spawn);
}
executor.getEventBus().post(ActionStatusMessage.runningStrategy(spawn.getResourceOwner(), "remote"));
try {
// Temporary hack: the TreeNodeRepository should be created and maintained upstream!
TreeNodeRepository repository = new TreeNodeRepository(execRoot);
List<ActionInput> inputs = ActionInputHelper.expandArtifacts(spawn.getInputFiles(), actionExecutionContext.getArtifactExpander());
TreeNode inputRoot = repository.buildFromActionInputs(inputs);
repository.computeMerkleDigests(inputRoot);
Command command = buildCommand(spawn.getArguments(), spawn.getEnvironment());
Action action = buildAction(spawn.getOutputFiles(), ContentDigests.computeDigest(command), repository.getMerkleDigest(inputRoot));
// Look up action cache, and reuse the action output if it is found.
actionKey = ContentDigests.computeActionKey(action);
ActionResult result = this.options.remoteAcceptCached ? actionCache.getCachedActionResult(actionKey) : null;
boolean acceptCachedResult = this.options.remoteAcceptCached;
if (result != null) {
// just update the TreeNodeRepository and continue the build.
try {
actionCache.downloadAllResults(result, execRoot);
return;
} catch (CacheNotFoundException e) {
// Retry the action remotely and invalidate the results.
acceptCachedResult = false;
}
}
// Upload the command and all the inputs into the remote cache.
actionCache.uploadBlob(command.toByteArray());
// TODO(olaola): this should use the ActionInputFileCache for SHA1 digests!
actionCache.uploadTree(repository, execRoot, inputRoot);
// TODO(olaola): set BuildInfo and input total bytes as well.
ExecuteRequest.Builder request = ExecuteRequest.newBuilder().setAction(action).setAcceptCached(acceptCachedResult).setTotalInputFileCount(inputs.size()).setTimeoutMillis(1000 * Spawns.getTimeoutSeconds(spawn, 120));
// TODO(olaola): set sensible local and remote timouts.
ExecuteReply reply = workExecutor.executeRemotely(request.build());
ExecutionStatus status = reply.getStatus();
result = reply.getResult();
// action.
if (status.getSucceeded()) {
passRemoteOutErr(actionCache, result, actionExecutionContext.getFileOutErr());
actionCache.downloadAllResults(result, execRoot);
return;
}
if (status.getError() == ExecutionStatus.ErrorCode.EXEC_FAILED || !options.remoteAllowLocalFallback) {
passRemoteOutErr(actionCache, result, actionExecutionContext.getFileOutErr());
throw new UserExecException(status.getErrorDetail());
}
// For now, we retry locally on all other remote errors.
// TODO(olaola): add remote retries on cache miss errors.
execLocally(spawn, actionExecutionContext, actionCache, actionKey);
} catch (IOException e) {
throw new UserExecException("Unexpected IO error.", e);
} catch (InterruptedException e) {
eventHandler.handle(Event.warn(mnemonic + " remote work interrupted (" + e + ")"));
Thread.currentThread().interrupt();
throw e;
} catch (StatusRuntimeException e) {
String stackTrace = "";
if (verboseFailures) {
stackTrace = "\n" + Throwables.getStackTraceAsString(e);
}
eventHandler.handle(Event.warn(mnemonic + " remote work failed (" + e + ")" + stackTrace));
if (options.remoteAllowLocalFallback) {
execLocally(spawn, actionExecutionContext, actionCache, actionKey);
} else {
throw new UserExecException(e);
}
} catch (CacheNotFoundException e) {
eventHandler.handle(Event.warn(mnemonic + " remote work results cache miss (" + e + ")"));
if (options.remoteAllowLocalFallback) {
execLocally(spawn, actionExecutionContext, actionCache, actionKey);
} else {
throw new UserExecException(e);
}
} catch (UnsupportedOperationException e) {
eventHandler.handle(Event.warn(mnemonic + " unsupported operation for action cache (" + e + ")"));
}
}
use of com.google.devtools.build.lib.events.EventHandler in project bazel by bazelbuild.
the class BlazeCommandDispatcher method execExclusively.
private int execExclusively(List<String> args, OutErr outErr, long firstContactTime, String commandName, BlazeCommand command, long waitTimeInMs) throws ShutdownBlazeServerException {
Command commandAnnotation = command.getClass().getAnnotation(Command.class);
// Record the start time for the profiler. Do not put anything before this!
long execStartTimeNanos = runtime.getClock().nanoTime();
// The initCommand call also records the start time for the timestamp granularity monitor.
CommandEnvironment env = runtime.getWorkspace().initCommand();
// Record the command's starting time for use by the commands themselves.
env.recordCommandStartTime(firstContactTime);
AbruptExitException exitCausingException = null;
for (BlazeModule module : runtime.getBlazeModules()) {
try {
module.beforeCommand(commandAnnotation, env);
} catch (AbruptExitException e) {
// Don't let one module's complaints prevent the other modules from doing necessary
// setup. We promised to call beforeCommand exactly once per-module before each command
// and will be calling afterCommand soon in the future - a module's afterCommand might
// rightfully assume its beforeCommand has already been called.
outErr.printErrLn(e.getMessage());
// It's not ideal but we can only return one exit code, so we just pick the code of the
// last exception.
exitCausingException = e;
}
}
if (exitCausingException != null) {
return exitCausingException.getExitCode().getNumericExitCode();
}
try {
Path commandLog = getCommandLogPath(env.getOutputBase());
// Unlink old command log from previous build, if present, so scripts
// reading it don't conflate it with the command log we're about to write.
closeSilently(logOutputStream);
logOutputStream = null;
commandLog.delete();
if (env.getRuntime().writeCommandLog() && commandAnnotation.writeCommandLog()) {
logOutputStream = commandLog.getOutputStream();
outErr = tee(outErr, OutErr.create(logOutputStream, logOutputStream));
}
} catch (IOException ioException) {
LoggingUtil.logToRemote(Level.WARNING, "Unable to delete or open command.log", ioException);
}
ExitCode result = checkCwdInWorkspace(env, commandAnnotation, commandName, outErr);
if (!result.equals(ExitCode.SUCCESS)) {
return result.getNumericExitCode();
}
OptionsParser optionsParser;
// Delay output of notes regarding the parsed rc file, so it's possible to disable this in the
// rc file.
List<String> rcfileNotes = new ArrayList<>();
try {
optionsParser = createOptionsParser(command);
parseArgsAndConfigs(env, optionsParser, commandAnnotation, args, rcfileNotes, outErr);
InvocationPolicyEnforcer optionsPolicyEnforcer = new InvocationPolicyEnforcer(runtime.getInvocationPolicy());
optionsPolicyEnforcer.enforce(optionsParser, commandName);
optionsPolicyEnforcer = InvocationPolicyEnforcer.create(getRuntime().getStartupOptionsProvider().getOptions(BlazeServerStartupOptions.class).invocationPolicy);
optionsPolicyEnforcer.enforce(optionsParser, commandName);
} catch (OptionsParsingException e) {
for (String note : rcfileNotes) {
outErr.printErrLn("INFO: " + note);
}
outErr.printErrLn(e.getMessage());
return ExitCode.COMMAND_LINE_ERROR.getNumericExitCode();
}
// Setup log filtering
BlazeCommandEventHandler.Options eventHandlerOptions = optionsParser.getOptions(BlazeCommandEventHandler.Options.class);
OutErr colorfulOutErr = outErr;
if (!eventHandlerOptions.useColor()) {
outErr = ansiStripOut(ansiStripErr(outErr));
if (!commandAnnotation.binaryStdOut()) {
colorfulOutErr = ansiStripOut(colorfulOutErr);
}
if (!commandAnnotation.binaryStdErr()) {
colorfulOutErr = ansiStripErr(colorfulOutErr);
}
}
if (!commandAnnotation.binaryStdOut()) {
outErr = lineBufferOut(outErr);
}
if (!commandAnnotation.binaryStdErr()) {
outErr = lineBufferErr(outErr);
}
CommonCommandOptions commonOptions = optionsParser.getOptions(CommonCommandOptions.class);
if (!commonOptions.verbosity.equals(lastLogVerbosityLevel)) {
BlazeRuntime.setupLogging(commonOptions.verbosity);
lastLogVerbosityLevel = commonOptions.verbosity;
}
// Do this before an actual crash so we don't have to worry about
// allocating memory post-crash.
String[] crashData = env.getCrashData();
int numericExitCode = ExitCode.BLAZE_INTERNAL_ERROR.getNumericExitCode();
PrintStream savedOut = System.out;
PrintStream savedErr = System.err;
EventHandler handler = createEventHandler(outErr, eventHandlerOptions);
Reporter reporter = env.getReporter();
reporter.addHandler(handler);
env.getEventBus().register(handler);
// We register an ANSI-allowing handler associated with {@code handler} so that ANSI control
// codes can be re-introduced later even if blaze is invoked with --color=no. This is useful
// for commands such as 'blaze run' where the output of the final executable shouldn't be
// modified.
EventHandler ansiAllowingHandler = null;
if (!eventHandlerOptions.useColor()) {
ansiAllowingHandler = createEventHandler(colorfulOutErr, eventHandlerOptions);
reporter.registerAnsiAllowingHandler(handler, ansiAllowingHandler);
if (ansiAllowingHandler instanceof ExperimentalEventHandler) {
env.getEventBus().register(new PassiveExperimentalEventHandler((ExperimentalEventHandler) ansiAllowingHandler));
}
}
try {
// While a Blaze command is active, direct all errors to the client's
// event handler (and out/err streams).
OutErr reporterOutErr = reporter.getOutErr();
System.setOut(new PrintStream(reporterOutErr.getOutputStream(), /*autoflush=*/
true));
System.setErr(new PrintStream(reporterOutErr.getErrorStream(), /*autoflush=*/
true));
for (BlazeModule module : runtime.getBlazeModules()) {
module.checkEnvironment(env);
}
if (commonOptions.announceRcOptions) {
for (String note : rcfileNotes) {
reporter.handle(Event.info(note));
}
}
try {
// Notify the BlazeRuntime, so it can do some initial setup.
env.beforeCommand(commandAnnotation, optionsParser, commonOptions, execStartTimeNanos, waitTimeInMs);
// Allow the command to edit options after parsing:
command.editOptions(env, optionsParser);
} catch (AbruptExitException e) {
reporter.handle(Event.error(e.getMessage()));
return e.getExitCode().getNumericExitCode();
}
for (BlazeModule module : runtime.getBlazeModules()) {
module.handleOptions(optionsParser);
}
// Print warnings for odd options usage
for (String warning : optionsParser.getWarnings()) {
reporter.handle(Event.warn(warning));
}
ExitCode outcome = command.exec(env, optionsParser);
outcome = env.precompleteCommand(outcome);
numericExitCode = outcome.getNumericExitCode();
return numericExitCode;
} catch (ShutdownBlazeServerException e) {
numericExitCode = e.getExitStatus();
throw e;
} catch (Throwable e) {
e.printStackTrace();
BugReport.printBug(outErr, e);
BugReport.sendBugReport(e, args, crashData);
numericExitCode = BugReport.getExitCodeForThrowable(e);
throw new ShutdownBlazeServerException(numericExitCode, e);
} finally {
env.getEventBus().post(new AfterCommandEvent());
runtime.afterCommand(env, numericExitCode);
// Swallow IOException, as we are already in a finally clause
Flushables.flushQuietly(outErr.getOutputStream());
Flushables.flushQuietly(outErr.getErrorStream());
System.setOut(savedOut);
System.setErr(savedErr);
reporter.removeHandler(handler);
releaseHandler(handler);
if (!eventHandlerOptions.useColor()) {
reporter.removeHandler(ansiAllowingHandler);
releaseHandler(ansiAllowingHandler);
}
env.getTimestampGranularityMonitor().waitForTimestampGranularity(outErr);
}
}
use of com.google.devtools.build.lib.events.EventHandler in project bazel by bazelbuild.
the class SandboxStrategy method runSpawn.
protected void runSpawn(Spawn spawn, ActionExecutionContext actionExecutionContext, Map<String, String> spawnEnvironment, SandboxExecRoot sandboxExecRoot, Set<PathFragment> outputs, SandboxRunner runner, AtomicReference<Class<? extends SpawnActionContext>> writeOutputFiles) throws ExecException, InterruptedException {
EventHandler eventHandler = actionExecutionContext.getExecutor().getEventHandler();
ExecException execException = null;
OutErr outErr = actionExecutionContext.getFileOutErr();
try {
runner.run(spawn.getArguments(), spawnEnvironment, outErr, Spawns.getTimeoutSeconds(spawn), SandboxHelpers.shouldAllowNetwork(buildRequest, spawn), sandboxOptions.sandboxDebug, sandboxOptions.sandboxFakeHostname);
} catch (ExecException e) {
execException = e;
}
if (writeOutputFiles != null && !writeOutputFiles.compareAndSet(null, SandboxStrategy.class)) {
throw new InterruptedException();
}
try {
// We copy the outputs even when the command failed, otherwise StandaloneTestStrategy
// won't be able to get the test logs of a failed test. (We should probably do this in
// some better way.)
sandboxExecRoot.copyOutputs(execRoot, outputs);
} catch (IOException e) {
if (execException == null) {
throw new UserExecException("Could not move output artifacts from sandboxed execution", e);
} else {
// Catch the IOException and turn it into an error message, otherwise this might hide an
// exception thrown during runner.run earlier.
eventHandler.handle(Event.error("I/O exception while extracting output artifacts from sandboxed execution: " + e));
}
}
if (execException != null) {
outErr.printErr("Use --strategy=" + spawn.getMnemonic() + "=standalone to disable sandboxing for the failing actions.\n");
throw execException;
}
}
use of com.google.devtools.build.lib.events.EventHandler in project bazel by bazelbuild.
the class PrepareDepsOfPatternsFunction method compute.
/**
* Given a {@link SkyKey} that contains a sequence of target patterns, when this function returns
* {@link PrepareDepsOfPatternsValue}, then all targets matching that sequence, and those targets'
* transitive dependencies, have been loaded.
*/
@Nullable
@Override
public SkyValue compute(SkyKey skyKey, Environment env) throws InterruptedException {
EventHandler eventHandler = env.getListener();
ImmutableList<SkyKey> skyKeys = getSkyKeys(skyKey, eventHandler);
Map<SkyKey, ValueOrException<TargetParsingException>> tokensByKey = env.getValuesOrThrow(skyKeys, TargetParsingException.class);
if (env.valuesMissing()) {
return null;
}
boolean handlerIsParseFailureListener = eventHandler instanceof ParseFailureListener;
for (SkyKey key : skyKeys) {
try {
// The only exception type throwable by PrepareDepsOfPatternFunction is
// TargetParsingException. Therefore all ValueOrException values in the map will either
// be non-null or throw TargetParsingException when get is called.
Preconditions.checkNotNull(tokensByKey.get(key).get());
} catch (TargetParsingException e) {
// If a target pattern can't be evaluated, notify the user of the problem and keep going.
handleTargetParsingException(eventHandler, handlerIsParseFailureListener, key, e);
}
}
return new PrepareDepsOfPatternsValue(getTargetPatternKeys(skyKeys));
}
Aggregations