use of build.bazel.remote.execution.v2.ExecutionPolicy in project bazel-buildfarm by bazelbuild.
the class ShardWorkerContext method getMatchProvisions.
static SetMultimap<String, String> getMatchProvisions(Platform platform, Iterable<ExecutionPolicy> policies, int executeStageWidth) {
ImmutableSetMultimap.Builder<String, String> provisions = ImmutableSetMultimap.builder();
Platform matchPlatform = ExecutionPolicies.getMatchPlatform(platform, policies);
for (Platform.Property property : matchPlatform.getPropertiesList()) {
provisions.put(property.getName(), property.getValue());
}
provisions.put(PROVISION_CORES_NAME, String.format("%d", executeStageWidth));
return provisions.build();
}
use of build.bazel.remote.execution.v2.ExecutionPolicy in project bazel-buildfarm by bazelbuild.
the class Executor method runInterruptible.
private long runInterruptible(Stopwatch stopwatch, ResourceLimits limits) throws InterruptedException {
long startedAt = System.currentTimeMillis();
ExecuteOperationMetadata metadata;
try {
metadata = operationContext.operation.getMetadata().unpack(ExecuteOperationMetadata.class);
} catch (InvalidProtocolBufferException e) {
logger.log(Level.SEVERE, "invalid execute operation metadata", e);
return 0;
}
ExecuteOperationMetadata executingMetadata = metadata.toBuilder().setStage(ExecutionStage.Value.EXECUTING).build();
Iterable<ExecutionPolicy> policies = new ArrayList<>();
if (limits.useExecutionPolicies) {
policies = ExecutionPolicies.forPlatform(operationContext.command.getPlatform(), workerContext::getExecutionPolicies);
}
Operation operation = operationContext.operation.toBuilder().setMetadata(Any.pack(ExecutingOperationMetadata.newBuilder().setStartedAt(startedAt).setExecutingOn(workerContext.getName()).setExecuteOperationMetadata(executingMetadata).setRequestMetadata(operationContext.queueEntry.getExecuteEntry().getRequestMetadata()).build())).build();
boolean operationUpdateSuccess = false;
try {
operationUpdateSuccess = workerContext.putOperation(operation);
} catch (IOException e) {
logger.log(Level.SEVERE, format("error putting operation %s as EXECUTING", operation.getName()), e);
}
if (!operationUpdateSuccess) {
logger.log(Level.WARNING, String.format("Executor::run(%s): could not transition to EXECUTING", operation.getName()));
putError();
return 0;
}
// settings for deciding timeout
TimeoutSettings timeoutSettings = new TimeoutSettings();
timeoutSettings.defaultTimeout = workerContext.getDefaultActionTimeout();
timeoutSettings.maxTimeout = workerContext.getMaximumActionTimeout();
// decide timeout and begin deadline
Duration timeout = decideTimeout(timeoutSettings, operationContext.action);
Deadline pollDeadline = Time.toDeadline(timeout);
workerContext.resumePoller(operationContext.poller, "Executor", operationContext.queueEntry, ExecutionStage.Value.EXECUTING, Thread.currentThread()::interrupt, pollDeadline);
try {
return executePolled(operation, limits, policies, timeout, stopwatch);
} finally {
operationContext.poller.pause();
}
}
use of build.bazel.remote.execution.v2.ExecutionPolicy in project bazel-buildfarm by bazelbuild.
the class Executor method executePolled.
private long executePolled(Operation operation, ResourceLimits limits, Iterable<ExecutionPolicy> policies, Duration timeout, Stopwatch stopwatch) throws InterruptedException {
/* execute command */
logger.log(Level.FINE, "Executor: Operation " + operation.getName() + " Executing command");
ActionResult.Builder resultBuilder = operationContext.executeResponse.getResultBuilder();
resultBuilder.getExecutionMetadataBuilder().setExecutionStartTimestamp(Timestamps.fromMillis(System.currentTimeMillis()));
Command command = operationContext.command;
Path workingDirectory = operationContext.execDir;
if (!command.getWorkingDirectory().isEmpty()) {
workingDirectory = workingDirectory.resolve(command.getWorkingDirectory());
}
String operationName = operation.getName();
ImmutableList.Builder<String> arguments = ImmutableList.builder();
Code statusCode;
try (IOResource resource = workerContext.limitExecution(operationName, arguments, operationContext.command, workingDirectory)) {
for (ExecutionPolicy policy : policies) {
if (policy.getPolicyCase() == WRAPPER) {
arguments.addAll(transformWrapper(policy.getWrapper()));
}
}
if (System.getProperty("os.name").contains("Win")) {
// Make sure that the executable path is absolute, otherwise processbuilder fails on windows
Iterator<String> argumentItr = command.getArgumentsList().iterator();
if (argumentItr.hasNext()) {
// Get first element, this is the executable
String exe = argumentItr.next();
arguments.add(workingDirectory.resolve(exe).toAbsolutePath().normalize().toString());
argumentItr.forEachRemaining(arguments::add);
}
} else {
arguments.addAll(command.getArgumentsList());
}
statusCode = executeCommand(operationName, workingDirectory, arguments.build(), command.getEnvironmentVariablesList(), limits, timeout, // executingMetadata.getStderrStreamName(),
resultBuilder);
// Based on configuration, we will decide whether remaining resources should be an error.
if (workerContext.shouldErrorOperationOnRemainingResources() && resource.isReferenced() && statusCode == Code.OK) {
// there should no longer be any references to the resource. Any references will be
// killed upon close, but we must error the operation due to improper execution
// per the gRPC spec: 'The operation was attempted past the valid range.' Seems
// appropriate
statusCode = Code.OUT_OF_RANGE;
operationContext.executeResponse.getStatusBuilder().setMessage("command resources were referenced after execution completed");
}
} catch (IOException e) {
logger.log(Level.SEVERE, format("error executing operation %s", operationName), e);
operationContext.poller.pause();
putError();
return 0;
}
// switch poller to disable deadline
operationContext.poller.pause();
workerContext.resumePoller(operationContext.poller, "Executor(claim)", operationContext.queueEntry, ExecutionStage.Value.EXECUTING, () -> {
}, Deadline.after(10, DAYS));
resultBuilder.getExecutionMetadataBuilder().setExecutionCompletedTimestamp(Timestamps.fromMillis(System.currentTimeMillis()));
long executeUSecs = stopwatch.elapsed(MICROSECONDS);
logger.log(Level.FINE, String.format("Executor::executeCommand(%s): Completed command: exit code %d", operationName, resultBuilder.getExitCode()));
operationContext.executeResponse.getStatusBuilder().setCode(statusCode.getNumber());
OperationContext reportOperationContext = operationContext.toBuilder().setOperation(operation).build();
boolean claimed = owner.output().claim(reportOperationContext);
operationContext.poller.pause();
if (claimed) {
try {
owner.output().put(reportOperationContext);
} catch (InterruptedException e) {
owner.output().release();
throw e;
}
} else {
logger.log(Level.FINE, "Executor: Operation " + operationName + " Failed to claim output");
boolean wasInterrupted = Thread.interrupted();
try {
putError();
} finally {
if (wasInterrupted) {
Thread.currentThread().interrupt();
}
}
}
return stopwatch.elapsed(MICROSECONDS) - executeUSecs;
}
use of build.bazel.remote.execution.v2.ExecutionPolicy in project bazel-buildfarm by bazelbuild.
the class ExecutionPolicies method forPlatform.
public static Iterable<ExecutionPolicy> forPlatform(Platform platform, ExecutionPoliciesIndex policiesIndex) {
ImmutableList.Builder<ExecutionPolicy> policies = ImmutableList.builder();
policies.addAll(policiesIndex.get(DEFAULT_EXECUTION_POLICY_NAME));
for (Property property : platform.getPropertiesList()) {
if (property.getName().equals(EXECUTION_POLICY_PROPERTY_NAME) && !property.getValue().equals(DEFAULT_EXECUTION_POLICY_NAME)) {
policies.addAll(policiesIndex.get(property.getValue()));
}
}
return policies.build();
}
use of build.bazel.remote.execution.v2.ExecutionPolicy in project bazel-buildfarm by bazelbuild.
the class ShardInstance method execute.
@Override
public ListenableFuture<Void> execute(Digest actionDigest, boolean skipCacheLookup, ExecutionPolicy executionPolicy, ResultsCachePolicy resultsCachePolicy, RequestMetadata requestMetadata, Watcher watcher) {
try {
if (!backplane.canPrequeue()) {
return immediateFailedFuture(Status.RESOURCE_EXHAUSTED.withDescription("Too many jobs pending").asException());
}
String operationName = createOperationName(UUID.randomUUID().toString());
executionSuccess.inc();
logger.log(Level.FINE, new StringBuilder().append("ExecutionSuccess: ").append(requestMetadata.getToolInvocationId()).append(" -> ").append(operationName).append(": ").append(DigestUtil.toString(actionDigest)).toString());
readThroughActionCache.invalidate(DigestUtil.asActionKey(actionDigest));
if (!skipCacheLookup && recentCacheServedExecutions.getIfPresent(requestMetadata) != null) {
logger.log(Level.FINE, format("Operation %s will have skip_cache_lookup = true due to retry", operationName));
skipCacheLookup = true;
}
String stdoutStreamName = operationName + "/streams/stdout";
String stderrStreamName = operationName + "/streams/stderr";
ExecuteEntry executeEntry = ExecuteEntry.newBuilder().setOperationName(operationName).setActionDigest(actionDigest).setExecutionPolicy(executionPolicy).setResultsCachePolicy(resultsCachePolicy).setSkipCacheLookup(skipCacheLookup).setRequestMetadata(requestMetadata).setStdoutStreamName(stdoutStreamName).setStderrStreamName(stderrStreamName).setQueuedTimestamp(Timestamps.fromMillis(System.currentTimeMillis())).build();
ExecuteOperationMetadata metadata = ExecuteOperationMetadata.newBuilder().setActionDigest(actionDigest).setStdoutStreamName(stdoutStreamName).setStderrStreamName(stderrStreamName).build();
Operation operation = Operation.newBuilder().setName(operationName).setMetadata(Any.pack(metadata)).build();
try {
watcher.observe(operation);
} catch (Exception e) {
return immediateFailedFuture(e);
}
if (inDenyList(requestMetadata)) {
watcher.observe(operation.toBuilder().setDone(true).setResponse(Any.pack(denyActionResponse(actionDigest, BLOCK_LIST_ERROR))).build());
return immediateFuture(null);
}
backplane.prequeue(executeEntry, operation);
return watchOperation(operation, newActionResultWatcher(DigestUtil.asActionKey(actionDigest), watcher), /* initial=*/
false);
} catch (IOException e) {
return immediateFailedFuture(e);
}
}
Aggregations