use of com.google.devtools.build.lib.actions.ActionInput in project bazel by bazelbuild.
the class RemoteSpawnStrategy method exec.
/** Executes the given {@code spawn}. */
@Override
public void exec(Spawn spawn, ActionExecutionContext actionExecutionContext) throws ExecException, InterruptedException {
ActionKey actionKey = null;
String mnemonic = spawn.getMnemonic();
Executor executor = actionExecutionContext.getExecutor();
EventHandler eventHandler = executor.getEventHandler();
RemoteActionCache actionCache = null;
RemoteWorkExecutor workExecutor = null;
if (spawn.isRemotable()) {
// action to enable server-side parallelism (need a different gRPC channel per action).
try {
if (ConcurrentMapFactory.isRemoteCacheOptions(options)) {
actionCache = new ConcurrentMapActionCache(ConcurrentMapFactory.create(options));
} else if (GrpcActionCache.isRemoteCacheOptions(options)) {
actionCache = new GrpcActionCache(options);
}
if (actionCache != null && RemoteWorkExecutor.isRemoteExecutionOptions(options)) {
workExecutor = new RemoteWorkExecutor(options);
}
} catch (InvalidConfigurationException e) {
eventHandler.handle(Event.warn(e.toString()));
}
}
if (!spawn.isRemotable() || actionCache == null) {
standaloneStrategy.exec(spawn, actionExecutionContext);
return;
}
if (workExecutor == null) {
execLocally(spawn, actionExecutionContext, actionCache, actionKey);
return;
}
if (executor.reportsSubcommands()) {
executor.reportSubcommand(spawn);
}
executor.getEventBus().post(ActionStatusMessage.runningStrategy(spawn.getResourceOwner(), "remote"));
try {
// Temporary hack: the TreeNodeRepository should be created and maintained upstream!
TreeNodeRepository repository = new TreeNodeRepository(execRoot);
List<ActionInput> inputs = ActionInputHelper.expandArtifacts(spawn.getInputFiles(), actionExecutionContext.getArtifactExpander());
TreeNode inputRoot = repository.buildFromActionInputs(inputs);
repository.computeMerkleDigests(inputRoot);
Command command = buildCommand(spawn.getArguments(), spawn.getEnvironment());
Action action = buildAction(spawn.getOutputFiles(), ContentDigests.computeDigest(command), repository.getMerkleDigest(inputRoot));
// Look up action cache, and reuse the action output if it is found.
actionKey = ContentDigests.computeActionKey(action);
ActionResult result = this.options.remoteAcceptCached ? actionCache.getCachedActionResult(actionKey) : null;
boolean acceptCachedResult = this.options.remoteAcceptCached;
if (result != null) {
// just update the TreeNodeRepository and continue the build.
try {
actionCache.downloadAllResults(result, execRoot);
return;
} catch (CacheNotFoundException e) {
// Retry the action remotely and invalidate the results.
acceptCachedResult = false;
}
}
// Upload the command and all the inputs into the remote cache.
actionCache.uploadBlob(command.toByteArray());
// TODO(olaola): this should use the ActionInputFileCache for SHA1 digests!
actionCache.uploadTree(repository, execRoot, inputRoot);
// TODO(olaola): set BuildInfo and input total bytes as well.
ExecuteRequest.Builder request = ExecuteRequest.newBuilder().setAction(action).setAcceptCached(acceptCachedResult).setTotalInputFileCount(inputs.size()).setTimeoutMillis(1000 * Spawns.getTimeoutSeconds(spawn, 120));
// TODO(olaola): set sensible local and remote timouts.
ExecuteReply reply = workExecutor.executeRemotely(request.build());
ExecutionStatus status = reply.getStatus();
result = reply.getResult();
// action.
if (status.getSucceeded()) {
passRemoteOutErr(actionCache, result, actionExecutionContext.getFileOutErr());
actionCache.downloadAllResults(result, execRoot);
return;
}
if (status.getError() == ExecutionStatus.ErrorCode.EXEC_FAILED || !options.remoteAllowLocalFallback) {
passRemoteOutErr(actionCache, result, actionExecutionContext.getFileOutErr());
throw new UserExecException(status.getErrorDetail());
}
// For now, we retry locally on all other remote errors.
// TODO(olaola): add remote retries on cache miss errors.
execLocally(spawn, actionExecutionContext, actionCache, actionKey);
} catch (IOException e) {
throw new UserExecException("Unexpected IO error.", e);
} catch (InterruptedException e) {
eventHandler.handle(Event.warn(mnemonic + " remote work interrupted (" + e + ")"));
Thread.currentThread().interrupt();
throw e;
} catch (StatusRuntimeException e) {
String stackTrace = "";
if (verboseFailures) {
stackTrace = "\n" + Throwables.getStackTraceAsString(e);
}
eventHandler.handle(Event.warn(mnemonic + " remote work failed (" + e + ")" + stackTrace));
if (options.remoteAllowLocalFallback) {
execLocally(spawn, actionExecutionContext, actionCache, actionKey);
} else {
throw new UserExecException(e);
}
} catch (CacheNotFoundException e) {
eventHandler.handle(Event.warn(mnemonic + " remote work results cache miss (" + e + ")"));
if (options.remoteAllowLocalFallback) {
execLocally(spawn, actionExecutionContext, actionCache, actionKey);
} else {
throw new UserExecException(e);
}
} catch (UnsupportedOperationException e) {
eventHandler.handle(Event.warn(mnemonic + " unsupported operation for action cache (" + e + ")"));
}
}
use of com.google.devtools.build.lib.actions.ActionInput in project bazel by bazelbuild.
the class RemoteSpawnStrategy method execLocally.
/**
* Fallback: execute the spawn locally. If an ActionKey is provided, try to upload results to
* remote action cache.
*/
private void execLocally(Spawn spawn, ActionExecutionContext actionExecutionContext, RemoteActionCache actionCache, ActionKey actionKey) throws ExecException, InterruptedException {
standaloneStrategy.exec(spawn, actionExecutionContext);
if (options.remoteLocalExecUploadResults && actionCache != null && actionKey != null) {
ArrayList<Path> outputFiles = new ArrayList<>();
for (ActionInput output : spawn.getOutputFiles()) {
outputFiles.add(execRoot.getRelative(output.getExecPathString()));
}
try {
ActionResult.Builder result = ActionResult.newBuilder();
actionCache.uploadAllResults(execRoot, outputFiles, result);
actionCache.setCachedActionResult(actionKey, result.build());
// Handle all cache errors here.
} catch (IOException e) {
throw new UserExecException("Unexpected IO error.", e);
} catch (UnsupportedOperationException e) {
actionExecutionContext.getExecutor().getEventHandler().handle(Event.warn(spawn.getMnemonic() + " unsupported operation for action cache (" + e + ")"));
} catch (StatusRuntimeException e) {
actionExecutionContext.getExecutor().getEventHandler().handle(Event.warn(spawn.getMnemonic() + " failed uploading results (" + e + ")"));
}
}
}
use of com.google.devtools.build.lib.actions.ActionInput in project bazel by bazelbuild.
the class SpawnHelpers method mountInputs.
/** Mount all inputs of the spawn. */
void mountInputs(Map<PathFragment, Path> mounts, Spawn spawn, ActionExecutionContext actionExecutionContext) {
List<ActionInput> inputs = ActionInputHelper.expandArtifacts(spawn.getInputFiles(), actionExecutionContext.getArtifactExpander());
// inputs.
for (ActionInput input : spawn.getInputFiles()) {
if (input instanceof Artifact && ((Artifact) input).isTreeArtifact()) {
List<Artifact> containedArtifacts = new ArrayList<>();
actionExecutionContext.getArtifactExpander().expand((Artifact) input, containedArtifacts);
// mount empty TreeArtifacts as directories.
if (containedArtifacts.isEmpty()) {
PathFragment mount = new PathFragment(input.getExecPathString());
mounts.put(mount, execRoot.getRelative(mount));
}
}
}
for (ActionInput input : inputs) {
if (input.getExecPathString().contains("internal/_middlemen/")) {
continue;
}
PathFragment mount = new PathFragment(input.getExecPathString());
mounts.put(mount, execRoot.getRelative(mount));
}
}
use of com.google.devtools.build.lib.actions.ActionInput in project bazel by bazelbuild.
the class TreeNodeRepositoryTest method testMerkleDigests.
@Test
public void testMerkleDigests() throws Exception {
Artifact foo = new Artifact(scratch.file("/exec/root/a/foo", "1"), rootDir);
Artifact bar = new Artifact(scratch.file("/exec/root/a/bar", "11"), rootDir);
TreeNodeRepository repo = new TreeNodeRepository(rootDir.getPath());
TreeNode root = repo.buildFromActionInputs(ImmutableList.<ActionInput>of(foo, bar));
TreeNode aNode = root.getChildEntries().get(0).getChild();
// foo > bar in sort order!
TreeNode fooNode = aNode.getChildEntries().get(1).getChild();
TreeNode barNode = aNode.getChildEntries().get(0).getChild();
repo.computeMerkleDigests(root);
ImmutableCollection<ContentDigest> digests = repo.getAllDigests(root);
ContentDigest rootDigest = repo.getMerkleDigest(root);
ContentDigest aDigest = repo.getMerkleDigest(aNode);
ContentDigest fooDigest = repo.getMerkleDigest(fooNode);
ContentDigest fooContentsDigest = ContentDigests.computeDigest(foo.getPath());
ContentDigest barDigest = repo.getMerkleDigest(barNode);
ContentDigest barContentsDigest = ContentDigests.computeDigest(bar.getPath());
assertThat(digests).containsExactly(rootDigest, aDigest, barDigest, barContentsDigest, fooDigest, fooContentsDigest);
ArrayList<FileNode> fileNodes = new ArrayList<>();
ArrayList<ActionInput> actionInputs = new ArrayList<>();
repo.getDataFromDigests(digests, actionInputs, fileNodes);
assertThat(actionInputs).containsExactly(bar, foo);
assertThat(fileNodes).hasSize(4);
FileNode rootFileNode = fileNodes.get(0);
assertThat(rootFileNode.getChild(0).getPath()).isEqualTo("a");
assertThat(rootFileNode.getChild(0).getDigest()).isEqualTo(aDigest);
FileNode aFileNode = fileNodes.get(1);
assertThat(aFileNode.getChild(0).getPath()).isEqualTo("bar");
assertThat(aFileNode.getChild(0).getDigest()).isEqualTo(barDigest);
assertThat(aFileNode.getChild(1).getPath()).isEqualTo("foo");
assertThat(aFileNode.getChild(1).getDigest()).isEqualTo(fooDigest);
FileNode barFileNode = fileNodes.get(2);
assertThat(barFileNode.getFileMetadata().getDigest()).isEqualTo(barContentsDigest);
FileNode fooFileNode = fileNodes.get(3);
assertThat(fooFileNode.getFileMetadata().getDigest()).isEqualTo(fooContentsDigest);
}
Aggregations