use of build.buildfarm.v1test.QueuedOperation in project bazel-buildfarm by bazelbuild.
the class ShardInstance method transformAndQueue.
private ListenableFuture<Void> transformAndQueue(ExecuteEntry executeEntry, Poller poller, Operation operation, Stopwatch stopwatch, Duration timeout) {
long checkCacheUSecs = stopwatch.elapsed(MICROSECONDS);
ExecuteOperationMetadata metadata;
try {
metadata = operation.getMetadata().unpack(ExecuteOperationMetadata.class);
} catch (InvalidProtocolBufferException e) {
return immediateFailedFuture(e);
}
Digest actionDigest = metadata.getActionDigest();
SettableFuture<Void> queueFuture = SettableFuture.create();
logger.log(Level.FINE, format("ShardInstance(%s): queue(%s): fetching action %s", getName(), operation.getName(), actionDigest.getHash()));
RequestMetadata requestMetadata = executeEntry.getRequestMetadata();
ListenableFuture<Action> actionFuture = catchingAsync(transformAsync(expectAction(actionDigest, requestMetadata), (action) -> {
if (action == null) {
throw Status.NOT_FOUND.asException();
} else if (action.getDoNotCache()) {
// invalidate our action cache result as well as watcher owner
readThroughActionCache.invalidate(DigestUtil.asActionKey(actionDigest));
backplane.putOperation(operation.toBuilder().setMetadata(Any.pack(action)).build(), metadata.getStage());
}
return immediateFuture(action);
}, operationTransformService), StatusException.class, (e) -> {
Status st = Status.fromThrowable(e);
if (st.getCode() == Code.NOT_FOUND) {
PreconditionFailure.Builder preconditionFailure = PreconditionFailure.newBuilder();
preconditionFailure.addViolationsBuilder().setType(VIOLATION_TYPE_MISSING).setSubject("blobs/" + DigestUtil.toString(actionDigest)).setDescription(MISSING_ACTION);
checkPreconditionFailure(actionDigest, preconditionFailure.build());
}
throw st.asRuntimeException();
}, operationTransformService);
QueuedOperation.Builder queuedOperationBuilder = QueuedOperation.newBuilder();
ListenableFuture<ProfiledQueuedOperationMetadata.Builder> queuedFuture = transformAsync(actionFuture, (action) -> {
logger.log(Level.FINE, format("ShardInstance(%s): queue(%s): fetched action %s transforming queuedOperation", getName(), operation.getName(), actionDigest.getHash()));
Stopwatch transformStopwatch = Stopwatch.createStarted();
return transform(transformQueuedOperation(operation.getName(), action, action.getCommandDigest(), action.getInputRootDigest(), queuedOperationBuilder, operationTransformService, requestMetadata), (queuedOperation) -> ProfiledQueuedOperationMetadata.newBuilder().setQueuedOperation(queuedOperation).setQueuedOperationMetadata(buildQueuedOperationMetadata(metadata, requestMetadata, queuedOperation)).setTransformedIn(Durations.fromMicros(transformStopwatch.elapsed(MICROSECONDS))), operationTransformService);
}, operationTransformService);
ListenableFuture<ProfiledQueuedOperationMetadata.Builder> validatedFuture = transformAsync(queuedFuture, (profiledQueuedMetadata) -> {
logger.log(Level.FINE, format("ShardInstance(%s): queue(%s): queuedOperation %s transformed, validating", getName(), operation.getName(), DigestUtil.toString(profiledQueuedMetadata.getQueuedOperationMetadata().getQueuedOperationDigest())));
long startValidateUSecs = stopwatch.elapsed(MICROSECONDS);
/* sync, throws StatusException */
validateQueuedOperation(actionDigest, profiledQueuedMetadata.getQueuedOperation());
return immediateFuture(profiledQueuedMetadata.setValidatedIn(Durations.fromMicros(stopwatch.elapsed(MICROSECONDS) - startValidateUSecs)));
}, operationTransformService);
ListenableFuture<ProfiledQueuedOperationMetadata> queuedOperationCommittedFuture = transformAsync(validatedFuture, (profiledQueuedMetadata) -> {
logger.log(Level.FINE, format("ShardInstance(%s): queue(%s): queuedOperation %s validated, uploading", getName(), operation.getName(), DigestUtil.toString(profiledQueuedMetadata.getQueuedOperationMetadata().getQueuedOperationDigest())));
ByteString queuedOperationBlob = profiledQueuedMetadata.getQueuedOperation().toByteString();
Digest queuedOperationDigest = profiledQueuedMetadata.getQueuedOperationMetadata().getQueuedOperationDigest();
long startUploadUSecs = stopwatch.elapsed(MICROSECONDS);
return transform(writeBlobFuture(queuedOperationDigest, queuedOperationBlob, requestMetadata, timeout), (committedSize) -> profiledQueuedMetadata.setUploadedIn(Durations.fromMicros(stopwatch.elapsed(MICROSECONDS) - startUploadUSecs)).build(), operationTransformService);
}, operationTransformService);
// onQueue call?
addCallback(queuedOperationCommittedFuture, new FutureCallback<ProfiledQueuedOperationMetadata>() {
@Override
public void onSuccess(ProfiledQueuedOperationMetadata profiledQueuedMetadata) {
QueuedOperationMetadata queuedOperationMetadata = profiledQueuedMetadata.getQueuedOperationMetadata();
Operation queueOperation = operation.toBuilder().setMetadata(Any.pack(queuedOperationMetadata)).build();
QueueEntry queueEntry = QueueEntry.newBuilder().setExecuteEntry(executeEntry).setQueuedOperationDigest(queuedOperationMetadata.getQueuedOperationDigest()).setPlatform(profiledQueuedMetadata.getQueuedOperation().getCommand().getPlatform()).build();
try {
ensureCanQueue(stopwatch);
long startQueueUSecs = stopwatch.elapsed(MICROSECONDS);
poller.pause();
backplane.queue(queueEntry, queueOperation);
long elapsedUSecs = stopwatch.elapsed(MICROSECONDS);
long queueUSecs = elapsedUSecs - startQueueUSecs;
logger.log(Level.FINE, format("ShardInstance(%s): queue(%s): %dus checkCache, %dus transform, %dus validate, %dus upload, %dus queue, %dus elapsed", getName(), queueOperation.getName(), checkCacheUSecs, Durations.toMicros(profiledQueuedMetadata.getTransformedIn()), Durations.toMicros(profiledQueuedMetadata.getValidatedIn()), Durations.toMicros(profiledQueuedMetadata.getUploadedIn()), queueUSecs, elapsedUSecs));
queueFuture.set(null);
} catch (IOException e) {
onFailure(e.getCause() == null ? e : e.getCause());
} catch (InterruptedException e) {
// ignore
}
}
@Override
public void onFailure(Throwable t) {
poller.pause();
com.google.rpc.Status status = StatusProto.fromThrowable(t);
if (status == null) {
logger.log(Level.SEVERE, "no rpc status from exception for " + operation.getName(), t);
status = asExecutionStatus(t);
} else if (com.google.rpc.Code.forNumber(status.getCode()) == com.google.rpc.Code.DEADLINE_EXCEEDED) {
logger.log(Level.WARNING, "an rpc status was thrown with DEADLINE_EXCEEDED for " + operation.getName() + ", discarding it", t);
status = com.google.rpc.Status.newBuilder().setCode(com.google.rpc.Code.UNAVAILABLE.getNumber()).setMessage("SUPPRESSED DEADLINE_EXCEEDED: " + t.getMessage()).build();
}
logFailedStatus(actionDigest, status);
errorOperationFuture(operation, requestMetadata, status, queueFuture);
}
}, operationTransformService);
return queueFuture;
}
use of build.buildfarm.v1test.QueuedOperation in project bazel-buildfarm by bazelbuild.
the class ShardInstance method validateAndRequeueOperation.
private ListenableFuture<Void> validateAndRequeueOperation(Operation operation, QueueEntry queueEntry, Duration timeout) {
ExecuteEntry executeEntry = queueEntry.getExecuteEntry();
String operationName = executeEntry.getOperationName();
checkState(operationName.equals(operation.getName()));
RequestMetadata requestMetadata = executeEntry.getRequestMetadata();
ListenableFuture<QueuedOperation> fetchQueuedOperationFuture = expect(queueEntry.getQueuedOperationDigest(), QueuedOperation.parser(), operationTransformService, requestMetadata);
Digest actionDigest = executeEntry.getActionDigest();
ListenableFuture<QueuedOperation> queuedOperationFuture = catchingAsync(fetchQueuedOperationFuture, Throwable.class, (e) -> buildQueuedOperation(operation.getName(), actionDigest, operationTransformService, requestMetadata), directExecutor());
PreconditionFailure.Builder preconditionFailure = PreconditionFailure.newBuilder();
ListenableFuture<QueuedOperation> validatedFuture = transformAsync(queuedOperationFuture, (queuedOperation) -> {
/* sync, throws StatusException - must be serviced via non-OTS */
validateQueuedOperationAndInputs(actionDigest, queuedOperation, preconditionFailure, requestMetadata);
return immediateFuture(queuedOperation);
}, operationTransformService);
// this little fork ensures that a successfully fetched QueuedOperation
// will not be reuploaded
ListenableFuture<QueuedOperationResult> uploadedFuture = transformAsync(validatedFuture, (queuedOperation) -> catchingAsync(transform(fetchQueuedOperationFuture, (fechedQueuedOperation) -> {
QueuedOperationMetadata metadata = QueuedOperationMetadata.newBuilder().setExecuteOperationMetadata(executeOperationMetadata(executeEntry, ExecutionStage.Value.QUEUED)).setQueuedOperationDigest(queueEntry.getQueuedOperationDigest()).setRequestMetadata(requestMetadata).build();
return new QueuedOperationResult(queueEntry, metadata);
}, operationTransformService), Throwable.class, (e) -> uploadQueuedOperation(queuedOperation, executeEntry, operationTransformService, timeout), operationTransformService), directExecutor());
SettableFuture<Void> requeuedFuture = SettableFuture.create();
addCallback(uploadedFuture, new FutureCallback<QueuedOperationResult>() {
@Override
public void onSuccess(QueuedOperationResult result) {
Operation queueOperation = operation.toBuilder().setMetadata(Any.pack(result.metadata)).build();
try {
backplane.queue(result.entry, queueOperation);
requeuedFuture.set(null);
} catch (IOException e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable t) {
logger.log(Level.SEVERE, "failed to requeue: " + operationName, t);
com.google.rpc.Status status = StatusProto.fromThrowable(t);
if (status == null) {
logger.log(Level.SEVERE, "no rpc status from exception for " + operationName, t);
status = asExecutionStatus(t);
} else if (com.google.rpc.Code.forNumber(status.getCode()) == com.google.rpc.Code.DEADLINE_EXCEEDED) {
logger.log(Level.WARNING, "an rpc status was thrown with DEADLINE_EXCEEDED for " + operationName + ", discarding it", t);
status = com.google.rpc.Status.newBuilder().setCode(com.google.rpc.Code.UNAVAILABLE.getNumber()).setMessage("SUPPRESSED DEADLINE_EXCEEDED: " + t.getMessage()).build();
}
logFailedStatus(actionDigest, status);
SettableFuture<Void> errorFuture = SettableFuture.create();
errorOperationFuture(operation, requestMetadata, status, errorFuture);
errorFuture.addListener(() -> requeuedFuture.set(null), operationTransformService);
}
}, operationTransformService);
return requeuedFuture;
}
use of build.buildfarm.v1test.QueuedOperation in project bazel-buildfarm by bazelbuild.
the class Cat method printQueuedOperation.
private static void printQueuedOperation(ByteString blob, DigestUtil digestUtil) {
QueuedOperation queuedOperation;
try {
queuedOperation = QueuedOperation.parseFrom(blob);
} catch (InvalidProtocolBufferException e) {
System.out.println("Not a QueuedOperation");
return;
}
System.out.println("QueuedOperation:");
System.out.println(" Action: " + DigestUtil.toString(digestUtil.compute(queuedOperation.getAction())));
printAction(2, queuedOperation.getAction());
System.out.println(" Command:");
printCommand(2, queuedOperation.getCommand());
System.out.println(" Tree:");
printTree(2, queuedOperation.getTree(), queuedOperation.getTree().getRootDigest());
}
use of build.buildfarm.v1test.QueuedOperation in project bazel-buildfarm by bazelbuild.
the class InputFetcher method fetchPolled.
private long fetchPolled(Stopwatch stopwatch) throws InterruptedException {
String operationName = operationContext.queueEntry.getExecuteEntry().getOperationName();
logger.log(Level.FINE, format("fetching inputs: %s", operationName));
ExecutedActionMetadata.Builder executedAction = operationContext.executeResponse.getResultBuilder().getExecutionMetadataBuilder().setInputFetchStartTimestamp(Timestamps.fromMillis(System.currentTimeMillis()));
final Map<Digest, Directory> directoriesIndex;
QueuedOperation queuedOperation;
Path execDir;
try {
queuedOperation = workerContext.getQueuedOperation(operationContext.queueEntry);
if (queuedOperation == null || !isQueuedOperationValid(queuedOperation)) {
if (queuedOperation != null) {
logger.log(Level.SEVERE, format("invalid queued operation: %s", operationName));
}
owner.error().put(operationContext);
return 0;
}
if (queuedOperation.hasTree()) {
directoriesIndex = DigestUtil.proxyDirectoriesIndex(queuedOperation.getTree().getDirectoriesMap());
} else {
// TODO remove legacy interpretation and field after transition
directoriesIndex = workerContext.getDigestUtil().createDirectoriesIndex(queuedOperation.getLegacyTree());
}
execDir = workerContext.createExecDir(operationName, directoriesIndex, queuedOperation.getAction(), queuedOperation.getCommand());
} catch (IOException e) {
logger.log(Level.SEVERE, format("error creating exec dir for %s", operationName), e);
owner.error().put(operationContext);
return 0;
}
success = true;
/* tweak command executable used */
String programName = queuedOperation.getCommand().getArguments(0);
Directory root = directoriesIndex.get(queuedOperation.getTree().getRootDigest());
Command command = queuedOperation.getCommand().toBuilder().clearArguments().addArguments(getExecutablePath(programName, root, directoriesIndex)).addAllArguments(Iterables.skip(queuedOperation.getCommand().getArgumentsList(), 1)).build();
executedAction.setInputFetchCompletedTimestamp(Timestamps.fromMillis(System.currentTimeMillis()));
// we are now responsible for destroying the exec dir if anything goes wrong
boolean completed = false;
try {
long fetchUSecs = stopwatch.elapsed(MICROSECONDS);
proceedToOutput(queuedOperation.getAction(), command, execDir);
completed = true;
return stopwatch.elapsed(MICROSECONDS) - fetchUSecs;
} finally {
if (!completed) {
try {
workerContext.destroyExecDir(execDir);
} catch (IOException e) {
logger.log(Level.SEVERE, format("error deleting exec dir for %s after interrupt", operationName));
}
}
}
}
use of build.buildfarm.v1test.QueuedOperation in project bazel-buildfarm by bazelbuild.
the class ShardInstanceTest method requeueSucceedsForValidOperation.
@Test
public void requeueSucceedsForValidOperation() throws Exception {
String operationName = "valid-operation";
when(mockBackplane.getOperation(eq(operationName))).thenReturn(Operation.newBuilder().setName(operationName).build());
Action action = createAction();
QueuedOperation queuedOperation = QueuedOperation.newBuilder().setAction(action).setCommand(SIMPLE_COMMAND).build();
ByteString queuedOperationBlob = queuedOperation.toByteString();
Digest queuedOperationDigest = DIGEST_UTIL.compute(queuedOperationBlob);
provideBlob(queuedOperationDigest, queuedOperationBlob);
Digest actionDigest = DIGEST_UTIL.compute(action);
QueueEntry queueEntry = QueueEntry.newBuilder().setExecuteEntry(ExecuteEntry.newBuilder().setOperationName(operationName).setSkipCacheLookup(true).setActionDigest(actionDigest)).setQueuedOperationDigest(queuedOperationDigest).build();
instance.requeueOperation(queueEntry, Durations.fromSeconds(60)).get();
}
Aggregations