use of build.buildfarm.v1test.ExecuteEntry in project bazel-buildfarm by bazelbuild.
the class ShardInstance method requeueOperation.
@VisibleForTesting
public ListenableFuture<Void> requeueOperation(QueueEntry queueEntry, Duration timeout) {
ListenableFuture<Void> future;
ExecuteEntry executeEntry = queueEntry.getExecuteEntry();
Operation operation = getOperation(executeEntry.getOperationName());
try {
// check preconditions before trying to requeue.
boolean canRequeue = canOperationBeRequeued(queueEntry, executeEntry, operation);
if (!canRequeue) {
return IMMEDIATE_VOID_FUTURE;
}
// Requeue the action as long as the result is not already cached.
ActionKey actionKey = DigestUtil.asActionKey(executeEntry.getActionDigest());
ListenableFuture<Boolean> cachedResultFuture;
if (executeEntry.getSkipCacheLookup()) {
cachedResultFuture = immediateFuture(false);
} else {
cachedResultFuture = checkCacheFuture(actionKey, operation, executeEntry.getRequestMetadata());
}
future = transformAsync(cachedResultFuture, (cachedResult) -> {
if (cachedResult) {
return IMMEDIATE_VOID_FUTURE;
}
return validateAndRequeueOperation(operation, queueEntry, timeout);
}, operationTransformService);
} catch (IOException | StatusRuntimeException e) {
return immediateFailedFuture(e);
}
return future;
}
use of build.buildfarm.v1test.ExecuteEntry in project bazel-buildfarm by bazelbuild.
the class ShardInstance method transformAndQueue.
private ListenableFuture<Void> transformAndQueue(ExecuteEntry executeEntry, Poller poller, Operation operation, Stopwatch stopwatch, Duration timeout) {
long checkCacheUSecs = stopwatch.elapsed(MICROSECONDS);
ExecuteOperationMetadata metadata;
try {
metadata = operation.getMetadata().unpack(ExecuteOperationMetadata.class);
} catch (InvalidProtocolBufferException e) {
return immediateFailedFuture(e);
}
Digest actionDigest = metadata.getActionDigest();
SettableFuture<Void> queueFuture = SettableFuture.create();
logger.log(Level.FINE, format("ShardInstance(%s): queue(%s): fetching action %s", getName(), operation.getName(), actionDigest.getHash()));
RequestMetadata requestMetadata = executeEntry.getRequestMetadata();
ListenableFuture<Action> actionFuture = catchingAsync(transformAsync(expectAction(actionDigest, requestMetadata), (action) -> {
if (action == null) {
throw Status.NOT_FOUND.asException();
} else if (action.getDoNotCache()) {
// invalidate our action cache result as well as watcher owner
readThroughActionCache.invalidate(DigestUtil.asActionKey(actionDigest));
backplane.putOperation(operation.toBuilder().setMetadata(Any.pack(action)).build(), metadata.getStage());
}
return immediateFuture(action);
}, operationTransformService), StatusException.class, (e) -> {
Status st = Status.fromThrowable(e);
if (st.getCode() == Code.NOT_FOUND) {
PreconditionFailure.Builder preconditionFailure = PreconditionFailure.newBuilder();
preconditionFailure.addViolationsBuilder().setType(VIOLATION_TYPE_MISSING).setSubject("blobs/" + DigestUtil.toString(actionDigest)).setDescription(MISSING_ACTION);
checkPreconditionFailure(actionDigest, preconditionFailure.build());
}
throw st.asRuntimeException();
}, operationTransformService);
QueuedOperation.Builder queuedOperationBuilder = QueuedOperation.newBuilder();
ListenableFuture<ProfiledQueuedOperationMetadata.Builder> queuedFuture = transformAsync(actionFuture, (action) -> {
logger.log(Level.FINE, format("ShardInstance(%s): queue(%s): fetched action %s transforming queuedOperation", getName(), operation.getName(), actionDigest.getHash()));
Stopwatch transformStopwatch = Stopwatch.createStarted();
return transform(transformQueuedOperation(operation.getName(), action, action.getCommandDigest(), action.getInputRootDigest(), queuedOperationBuilder, operationTransformService, requestMetadata), (queuedOperation) -> ProfiledQueuedOperationMetadata.newBuilder().setQueuedOperation(queuedOperation).setQueuedOperationMetadata(buildQueuedOperationMetadata(metadata, requestMetadata, queuedOperation)).setTransformedIn(Durations.fromMicros(transformStopwatch.elapsed(MICROSECONDS))), operationTransformService);
}, operationTransformService);
ListenableFuture<ProfiledQueuedOperationMetadata.Builder> validatedFuture = transformAsync(queuedFuture, (profiledQueuedMetadata) -> {
logger.log(Level.FINE, format("ShardInstance(%s): queue(%s): queuedOperation %s transformed, validating", getName(), operation.getName(), DigestUtil.toString(profiledQueuedMetadata.getQueuedOperationMetadata().getQueuedOperationDigest())));
long startValidateUSecs = stopwatch.elapsed(MICROSECONDS);
/* sync, throws StatusException */
validateQueuedOperation(actionDigest, profiledQueuedMetadata.getQueuedOperation());
return immediateFuture(profiledQueuedMetadata.setValidatedIn(Durations.fromMicros(stopwatch.elapsed(MICROSECONDS) - startValidateUSecs)));
}, operationTransformService);
ListenableFuture<ProfiledQueuedOperationMetadata> queuedOperationCommittedFuture = transformAsync(validatedFuture, (profiledQueuedMetadata) -> {
logger.log(Level.FINE, format("ShardInstance(%s): queue(%s): queuedOperation %s validated, uploading", getName(), operation.getName(), DigestUtil.toString(profiledQueuedMetadata.getQueuedOperationMetadata().getQueuedOperationDigest())));
ByteString queuedOperationBlob = profiledQueuedMetadata.getQueuedOperation().toByteString();
Digest queuedOperationDigest = profiledQueuedMetadata.getQueuedOperationMetadata().getQueuedOperationDigest();
long startUploadUSecs = stopwatch.elapsed(MICROSECONDS);
return transform(writeBlobFuture(queuedOperationDigest, queuedOperationBlob, requestMetadata, timeout), (committedSize) -> profiledQueuedMetadata.setUploadedIn(Durations.fromMicros(stopwatch.elapsed(MICROSECONDS) - startUploadUSecs)).build(), operationTransformService);
}, operationTransformService);
// onQueue call?
addCallback(queuedOperationCommittedFuture, new FutureCallback<ProfiledQueuedOperationMetadata>() {
@Override
public void onSuccess(ProfiledQueuedOperationMetadata profiledQueuedMetadata) {
QueuedOperationMetadata queuedOperationMetadata = profiledQueuedMetadata.getQueuedOperationMetadata();
Operation queueOperation = operation.toBuilder().setMetadata(Any.pack(queuedOperationMetadata)).build();
QueueEntry queueEntry = QueueEntry.newBuilder().setExecuteEntry(executeEntry).setQueuedOperationDigest(queuedOperationMetadata.getQueuedOperationDigest()).setPlatform(profiledQueuedMetadata.getQueuedOperation().getCommand().getPlatform()).build();
try {
ensureCanQueue(stopwatch);
long startQueueUSecs = stopwatch.elapsed(MICROSECONDS);
poller.pause();
backplane.queue(queueEntry, queueOperation);
long elapsedUSecs = stopwatch.elapsed(MICROSECONDS);
long queueUSecs = elapsedUSecs - startQueueUSecs;
logger.log(Level.FINE, format("ShardInstance(%s): queue(%s): %dus checkCache, %dus transform, %dus validate, %dus upload, %dus queue, %dus elapsed", getName(), queueOperation.getName(), checkCacheUSecs, Durations.toMicros(profiledQueuedMetadata.getTransformedIn()), Durations.toMicros(profiledQueuedMetadata.getValidatedIn()), Durations.toMicros(profiledQueuedMetadata.getUploadedIn()), queueUSecs, elapsedUSecs));
queueFuture.set(null);
} catch (IOException e) {
onFailure(e.getCause() == null ? e : e.getCause());
} catch (InterruptedException e) {
// ignore
}
}
@Override
public void onFailure(Throwable t) {
poller.pause();
com.google.rpc.Status status = StatusProto.fromThrowable(t);
if (status == null) {
logger.log(Level.SEVERE, "no rpc status from exception for " + operation.getName(), t);
status = asExecutionStatus(t);
} else if (com.google.rpc.Code.forNumber(status.getCode()) == com.google.rpc.Code.DEADLINE_EXCEEDED) {
logger.log(Level.WARNING, "an rpc status was thrown with DEADLINE_EXCEEDED for " + operation.getName() + ", discarding it", t);
status = com.google.rpc.Status.newBuilder().setCode(com.google.rpc.Code.UNAVAILABLE.getNumber()).setMessage("SUPPRESSED DEADLINE_EXCEEDED: " + t.getMessage()).build();
}
logFailedStatus(actionDigest, status);
errorOperationFuture(operation, requestMetadata, status, queueFuture);
}
}, operationTransformService);
return queueFuture;
}
use of build.buildfarm.v1test.ExecuteEntry in project bazel-buildfarm by bazelbuild.
the class ShardInstance method validateAndRequeueOperation.
private ListenableFuture<Void> validateAndRequeueOperation(Operation operation, QueueEntry queueEntry, Duration timeout) {
ExecuteEntry executeEntry = queueEntry.getExecuteEntry();
String operationName = executeEntry.getOperationName();
checkState(operationName.equals(operation.getName()));
RequestMetadata requestMetadata = executeEntry.getRequestMetadata();
ListenableFuture<QueuedOperation> fetchQueuedOperationFuture = expect(queueEntry.getQueuedOperationDigest(), QueuedOperation.parser(), operationTransformService, requestMetadata);
Digest actionDigest = executeEntry.getActionDigest();
ListenableFuture<QueuedOperation> queuedOperationFuture = catchingAsync(fetchQueuedOperationFuture, Throwable.class, (e) -> buildQueuedOperation(operation.getName(), actionDigest, operationTransformService, requestMetadata), directExecutor());
PreconditionFailure.Builder preconditionFailure = PreconditionFailure.newBuilder();
ListenableFuture<QueuedOperation> validatedFuture = transformAsync(queuedOperationFuture, (queuedOperation) -> {
/* sync, throws StatusException - must be serviced via non-OTS */
validateQueuedOperationAndInputs(actionDigest, queuedOperation, preconditionFailure, requestMetadata);
return immediateFuture(queuedOperation);
}, operationTransformService);
// this little fork ensures that a successfully fetched QueuedOperation
// will not be reuploaded
ListenableFuture<QueuedOperationResult> uploadedFuture = transformAsync(validatedFuture, (queuedOperation) -> catchingAsync(transform(fetchQueuedOperationFuture, (fechedQueuedOperation) -> {
QueuedOperationMetadata metadata = QueuedOperationMetadata.newBuilder().setExecuteOperationMetadata(executeOperationMetadata(executeEntry, ExecutionStage.Value.QUEUED)).setQueuedOperationDigest(queueEntry.getQueuedOperationDigest()).setRequestMetadata(requestMetadata).build();
return new QueuedOperationResult(queueEntry, metadata);
}, operationTransformService), Throwable.class, (e) -> uploadQueuedOperation(queuedOperation, executeEntry, operationTransformService, timeout), operationTransformService), directExecutor());
SettableFuture<Void> requeuedFuture = SettableFuture.create();
addCallback(uploadedFuture, new FutureCallback<QueuedOperationResult>() {
@Override
public void onSuccess(QueuedOperationResult result) {
Operation queueOperation = operation.toBuilder().setMetadata(Any.pack(result.metadata)).build();
try {
backplane.queue(result.entry, queueOperation);
requeuedFuture.set(null);
} catch (IOException e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable t) {
logger.log(Level.SEVERE, "failed to requeue: " + operationName, t);
com.google.rpc.Status status = StatusProto.fromThrowable(t);
if (status == null) {
logger.log(Level.SEVERE, "no rpc status from exception for " + operationName, t);
status = asExecutionStatus(t);
} else if (com.google.rpc.Code.forNumber(status.getCode()) == com.google.rpc.Code.DEADLINE_EXCEEDED) {
logger.log(Level.WARNING, "an rpc status was thrown with DEADLINE_EXCEEDED for " + operationName + ", discarding it", t);
status = com.google.rpc.Status.newBuilder().setCode(com.google.rpc.Code.UNAVAILABLE.getNumber()).setMessage("SUPPRESSED DEADLINE_EXCEEDED: " + t.getMessage()).build();
}
logFailedStatus(actionDigest, status);
SettableFuture<Void> errorFuture = SettableFuture.create();
errorOperationFuture(operation, requestMetadata, status, errorFuture);
errorFuture.addListener(() -> requeuedFuture.set(null), operationTransformService);
}
}, operationTransformService);
return requeuedFuture;
}
use of build.buildfarm.v1test.ExecuteEntry in project bazel-buildfarm by bazelbuild.
the class RedisShardBackplane method deprequeueOperation.
private ExecuteEntry deprequeueOperation(JedisCluster jedis) throws InterruptedException {
String executeEntryJson = prequeue.dequeue(jedis);
if (executeEntryJson == null) {
return null;
}
ExecuteEntry.Builder executeEntryBuilder = ExecuteEntry.newBuilder();
try {
JsonFormat.parser().merge(executeEntryJson, executeEntryBuilder);
ExecuteEntry executeEntry = executeEntryBuilder.build();
String operationName = executeEntry.getOperationName();
Operation operation = keepaliveOperation(operationName);
// publish so that watchers reset their timeout
publishReset(jedis, operation);
// destroy the processing entry and ttl
if (!prequeue.removeFromDequeue(jedis, executeEntryJson)) {
logger.log(Level.SEVERE, format("could not remove %s from %s", operationName, prequeue.getDequeueName()));
return null;
}
processingOperations.remove(jedis, operationName);
return executeEntry;
} catch (InvalidProtocolBufferException e) {
logger.log(Level.SEVERE, "error parsing execute entry", e);
return null;
}
}
use of build.buildfarm.v1test.ExecuteEntry in project bazel-buildfarm by bazelbuild.
the class ShardInstanceTest method queueActionMissingErrorsOperation.
@Test
public void queueActionMissingErrorsOperation() throws Exception {
Action action = createAction(false);
Digest actionDigest = DIGEST_UTIL.compute(action);
ExecuteEntry executeEntry = ExecuteEntry.newBuilder().setOperationName("missing-action-operation").setActionDigest(actionDigest).setSkipCacheLookup(true).build();
when(mockBackplane.canQueue()).thenReturn(true);
Poller poller = mock(Poller.class);
boolean failedPreconditionExceptionCaught = false;
try {
instance.queue(executeEntry, poller, DEFAULT_TIMEOUT).get(QUEUE_TEST_TIMEOUT_SECONDS, SECONDS);
} catch (ExecutionException e) {
com.google.rpc.Status status = StatusProto.fromThrowable(e);
if (status.getCode() == Code.FAILED_PRECONDITION.getNumber()) {
failedPreconditionExceptionCaught = true;
} else {
e.getCause().printStackTrace();
}
}
assertThat(failedPreconditionExceptionCaught).isTrue();
PreconditionFailure preconditionFailure = PreconditionFailure.newBuilder().addViolations(Violation.newBuilder().setType(VIOLATION_TYPE_MISSING).setSubject("blobs/" + DigestUtil.toString(actionDigest)).setDescription(MISSING_ACTION)).build();
ExecuteResponse executeResponse = ExecuteResponse.newBuilder().setStatus(com.google.rpc.Status.newBuilder().setCode(Code.FAILED_PRECONDITION.getNumber()).setMessage(invalidActionVerboseMessage(actionDigest, preconditionFailure)).addDetails(Any.pack(preconditionFailure))).build();
assertResponse(executeResponse);
verify(poller, atLeastOnce()).pause();
}
Aggregations