use of com.google.cloud.kms.v1.Digest in project bazel-buildfarm by bazelbuild.
the class ShardInstance method findMissingBlobs.
@Override
public ListenableFuture<Iterable<Digest>> findMissingBlobs(Iterable<Digest> blobDigests, RequestMetadata requestMetadata) {
try {
if (inDenyList(requestMetadata)) {
// TODO change this back to a transient when #10663 is landed
return immediateFuture(ImmutableList.of());
/*
return immediateFailedFuture(Status.UNAVAILABLE
.withDescription("The action associated with this request is forbidden")
.asException());
*/
}
} catch (IOException e) {
return immediateFailedFuture(Status.fromThrowable(e).asException());
}
Iterable<Digest> nonEmptyDigests = Iterables.filter(blobDigests, (digest) -> digest.getSizeBytes() > 0);
if (Iterables.isEmpty(nonEmptyDigests)) {
return immediateFuture(ImmutableList.of());
}
Deque<String> workers;
try {
Set<String> workerSet = backplane.getWorkers();
List<String> workersList;
synchronized (workerSet) {
workersList = new ArrayList<>(workerSet);
}
Collections.shuffle(workersList, rand);
workers = new ArrayDeque(workersList);
} catch (IOException e) {
return immediateFailedFuture(Status.fromThrowable(e).asException());
}
if (workers.isEmpty()) {
return immediateFuture(nonEmptyDigests);
}
SettableFuture<Iterable<Digest>> missingDigestsFuture = SettableFuture.create();
findMissingBlobsOnWorker(UUID.randomUUID().toString(), nonEmptyDigests, workers, ImmutableList.builder(), Iterables.size(nonEmptyDigests), Context.current().fixedContextExecutor(directExecutor()), missingDigestsFuture, requestMetadata);
return missingDigestsFuture;
}
use of com.google.cloud.kms.v1.Digest in project bazel-buildfarm by bazelbuild.
the class ShardInstance method queue.
@VisibleForTesting
public ListenableFuture<Void> queue(ExecuteEntry executeEntry, Poller poller, Duration timeout) {
ExecuteOperationMetadata metadata = ExecuteOperationMetadata.newBuilder().setActionDigest(executeEntry.getActionDigest()).setStdoutStreamName(executeEntry.getStdoutStreamName()).setStderrStreamName(executeEntry.getStderrStreamName()).build();
Operation operation = Operation.newBuilder().setName(executeEntry.getOperationName()).setMetadata(Any.pack(metadata)).build();
Digest actionDigest = executeEntry.getActionDigest();
ActionKey actionKey = DigestUtil.asActionKey(actionDigest);
Stopwatch stopwatch = Stopwatch.createStarted();
ListenableFuture<Boolean> cachedResultFuture;
if (executeEntry.getSkipCacheLookup()) {
cachedResultFuture = immediateFuture(false);
} else {
cachedResultFuture = checkCacheFuture(actionKey, operation, executeEntry.getRequestMetadata());
}
return transformAsync(cachedResultFuture, (cachedResult) -> {
if (cachedResult) {
poller.pause();
long checkCacheUSecs = stopwatch.elapsed(MICROSECONDS);
logger.log(Level.FINE, format("ShardInstance(%s): checkCache(%s): %sus elapsed", getName(), operation.getName(), checkCacheUSecs));
return IMMEDIATE_VOID_FUTURE;
}
return transformAndQueue(executeEntry, poller, operation, stopwatch, timeout);
}, operationTransformService);
}
use of com.google.cloud.kms.v1.Digest in project bazel-buildfarm by bazelbuild.
the class ShardInstance method uploadQueuedOperation.
private ListenableFuture<QueuedOperationResult> uploadQueuedOperation(QueuedOperation queuedOperation, ExecuteEntry executeEntry, ExecutorService service, Duration timeout) throws EntryLimitException {
ByteString queuedOperationBlob = queuedOperation.toByteString();
Digest queuedOperationDigest = getDigestUtil().compute(queuedOperationBlob);
QueuedOperationMetadata metadata = QueuedOperationMetadata.newBuilder().setExecuteOperationMetadata(executeOperationMetadata(executeEntry, ExecutionStage.Value.QUEUED)).setQueuedOperationDigest(queuedOperationDigest).build();
QueueEntry entry = QueueEntry.newBuilder().setExecuteEntry(executeEntry).setQueuedOperationDigest(queuedOperationDigest).setPlatform(queuedOperation.getCommand().getPlatform()).build();
return transform(writeBlobFuture(queuedOperationDigest, queuedOperationBlob, executeEntry.getRequestMetadata(), timeout), (committedSize) -> new QueuedOperationResult(entry, metadata), service);
}
use of com.google.cloud.kms.v1.Digest in project bazel-buildfarm by bazelbuild.
the class ShardInstance method getTreeFuture.
@Override
protected ListenableFuture<Tree> getTreeFuture(String reason, Digest inputRoot, ExecutorService service, RequestMetadata requestMetadata) {
SettableFuture<Void> future = SettableFuture.create();
Tree.Builder tree = Tree.newBuilder().setRootDigest(inputRoot);
Set<Digest> digests = Sets.newConcurrentHashSet();
Queue<Digest> remaining = new ConcurrentLinkedQueue();
remaining.offer(inputRoot);
Context ctx = Context.current();
TreeCallback callback = new TreeCallback(future) {
@Override
protected void onDirectory(Digest digest, Directory directory) {
tree.putDirectories(digest.getHash(), directory);
for (DirectoryNode childNode : directory.getDirectoriesList()) {
Digest child = childNode.getDigest();
if (digests.add(child)) {
remaining.offer(child);
}
}
}
@Override
boolean next() {
Digest nextDigest = remaining.poll();
if (!future.isDone() && nextDigest != null) {
ctx.run(() -> addCallback(transform(expectDirectory(reason, nextDigest, requestMetadata), directory -> new DirectoryEntry(nextDigest, directory), service), this, service));
return true;
}
return false;
}
};
callback.next();
return transform(future, (result) -> tree.build(), service);
}
use of com.google.cloud.kms.v1.Digest in project bazel-buildfarm by bazelbuild.
the class ShardInstance method validateAndRequeueOperation.
private ListenableFuture<Void> validateAndRequeueOperation(Operation operation, QueueEntry queueEntry, Duration timeout) {
ExecuteEntry executeEntry = queueEntry.getExecuteEntry();
String operationName = executeEntry.getOperationName();
checkState(operationName.equals(operation.getName()));
RequestMetadata requestMetadata = executeEntry.getRequestMetadata();
ListenableFuture<QueuedOperation> fetchQueuedOperationFuture = expect(queueEntry.getQueuedOperationDigest(), QueuedOperation.parser(), operationTransformService, requestMetadata);
Digest actionDigest = executeEntry.getActionDigest();
ListenableFuture<QueuedOperation> queuedOperationFuture = catchingAsync(fetchQueuedOperationFuture, Throwable.class, (e) -> buildQueuedOperation(operation.getName(), actionDigest, operationTransformService, requestMetadata), directExecutor());
PreconditionFailure.Builder preconditionFailure = PreconditionFailure.newBuilder();
ListenableFuture<QueuedOperation> validatedFuture = transformAsync(queuedOperationFuture, (queuedOperation) -> {
/* sync, throws StatusException - must be serviced via non-OTS */
validateQueuedOperationAndInputs(actionDigest, queuedOperation, preconditionFailure, requestMetadata);
return immediateFuture(queuedOperation);
}, operationTransformService);
// this little fork ensures that a successfully fetched QueuedOperation
// will not be reuploaded
ListenableFuture<QueuedOperationResult> uploadedFuture = transformAsync(validatedFuture, (queuedOperation) -> catchingAsync(transform(fetchQueuedOperationFuture, (fechedQueuedOperation) -> {
QueuedOperationMetadata metadata = QueuedOperationMetadata.newBuilder().setExecuteOperationMetadata(executeOperationMetadata(executeEntry, ExecutionStage.Value.QUEUED)).setQueuedOperationDigest(queueEntry.getQueuedOperationDigest()).setRequestMetadata(requestMetadata).build();
return new QueuedOperationResult(queueEntry, metadata);
}, operationTransformService), Throwable.class, (e) -> uploadQueuedOperation(queuedOperation, executeEntry, operationTransformService, timeout), operationTransformService), directExecutor());
SettableFuture<Void> requeuedFuture = SettableFuture.create();
addCallback(uploadedFuture, new FutureCallback<QueuedOperationResult>() {
@Override
public void onSuccess(QueuedOperationResult result) {
Operation queueOperation = operation.toBuilder().setMetadata(Any.pack(result.metadata)).build();
try {
backplane.queue(result.entry, queueOperation);
requeuedFuture.set(null);
} catch (IOException e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable t) {
logger.log(Level.SEVERE, "failed to requeue: " + operationName, t);
com.google.rpc.Status status = StatusProto.fromThrowable(t);
if (status == null) {
logger.log(Level.SEVERE, "no rpc status from exception for " + operationName, t);
status = asExecutionStatus(t);
} else if (com.google.rpc.Code.forNumber(status.getCode()) == com.google.rpc.Code.DEADLINE_EXCEEDED) {
logger.log(Level.WARNING, "an rpc status was thrown with DEADLINE_EXCEEDED for " + operationName + ", discarding it", t);
status = com.google.rpc.Status.newBuilder().setCode(com.google.rpc.Code.UNAVAILABLE.getNumber()).setMessage("SUPPRESSED DEADLINE_EXCEEDED: " + t.getMessage()).build();
}
logFailedStatus(actionDigest, status);
SettableFuture<Void> errorFuture = SettableFuture.create();
errorOperationFuture(operation, requestMetadata, status, errorFuture);
errorFuture.addListener(() -> requeuedFuture.set(null), operationTransformService);
}
}, operationTransformService);
return requeuedFuture;
}
Aggregations