use of com.google.cloud.kms.v1.Digest in project tools_remote by bazelbuild.
the class RemoteClient method listDirectory.
// Recursively list directory files/subdirectories with digests. Returns the number of files
// listed.
private int listDirectory(Path path, Directory dir, Map<Digest, Directory> childrenMap, int limit) throws IOException {
// Try to list the files in this directory before listing the directories.
int numFilesListed = listFileNodes(path, dir, limit);
if (numFilesListed >= limit) {
return numFilesListed;
}
for (DirectoryNode child : dir.getDirectoriesList()) {
Path childPath = path.resolve(child.getName());
printDirectoryNodeDetails(child, childPath);
Digest childDigest = child.getDigest();
Directory childDir = childrenMap.get(childDigest);
numFilesListed += listDirectory(childPath, childDir, childrenMap, limit - numFilesListed);
if (numFilesListed >= limit) {
return numFilesListed;
}
}
return numFilesListed;
}
use of com.google.cloud.kms.v1.Digest in project tools_remote by bazelbuild.
the class AbstractRemoteActionCache method downloadDirectory.
/**
* Download the full contents of a Directory to a local path given its digest.
*
* @param downloadPath The path to download the directory contents to.
* @param directoryDigest The digest of the Directory to download.
* @throws IOException
*/
public void downloadDirectory(Path downloadPath, Digest directoryDigest) throws IOException {
Tree tree = getTree(directoryDigest);
Map<Digest, Directory> childrenMap = new HashMap<>();
for (Directory child : tree.getChildrenList()) {
childrenMap.put(digestUtil.compute(child), child);
}
downloadDirectory(downloadPath, tree.getRoot(), childrenMap);
}
use of com.google.cloud.kms.v1.Digest in project tools_remote by bazelbuild.
the class ActionGrouping method failedActions.
List<Digest> failedActions() throws IOException {
if (V1found) {
System.err.println("This functinality is not supported for V1 API. Please upgrade your Bazel version.");
System.exit(1);
}
ArrayList<Digest> result = new ArrayList<>();
for (String hash : actionMap.keySet()) {
ActionDetails a = actionMap.get(hash);
if (a.isFailed()) {
Digest digest = a.getDigest();
if (digest == null) {
System.err.println("Error: missing digest for failed action " + hash);
} else {
result.add(digest);
}
}
}
return result;
}
use of com.google.cloud.kms.v1.Digest in project bazel-buildfarm by bazelbuild.
the class ShardInstance method findMissingBlobsOnWorker.
private void findMissingBlobsOnWorker(String requestId, Iterable<Digest> blobDigests, Deque<String> workers, ImmutableList.Builder<FindMissingResponseEntry> responses, int originalSize, Executor executor, SettableFuture<Iterable<Digest>> missingDigestsFuture, RequestMetadata requestMetadata) {
String worker = workers.removeFirst();
ListenableFuture<Iterable<Digest>> workerMissingBlobsFuture = workerStub(worker).findMissingBlobs(blobDigests, requestMetadata);
Stopwatch stopwatch = Stopwatch.createStarted();
addCallback(workerMissingBlobsFuture, new FutureCallback<Iterable<Digest>>() {
@Override
public void onSuccess(Iterable<Digest> missingDigests) {
if (Iterables.isEmpty(missingDigests) || workers.isEmpty()) {
missingDigestsFuture.set(missingDigests);
} else {
responses.add(new FindMissingResponseEntry(worker, stopwatch.elapsed(MICROSECONDS), null, Iterables.size(missingDigests)));
findMissingBlobsOnWorker(requestId, missingDigests, workers, responses, originalSize, executor, missingDigestsFuture, requestMetadata);
}
}
@Override
public void onFailure(Throwable t) {
responses.add(new FindMissingResponseEntry(worker, stopwatch.elapsed(MICROSECONDS), t, Iterables.size(blobDigests)));
Status status = Status.fromThrowable(t);
if (status.getCode() == Code.UNAVAILABLE || status.getCode() == Code.UNIMPLEMENTED) {
removeMalfunctioningWorker(worker, t, "findMissingBlobs(" + requestId + ")");
} else if (status.getCode() == Code.DEADLINE_EXCEEDED) {
for (FindMissingResponseEntry response : responses.build()) {
logger.log(response.exception == null ? Level.WARNING : Level.SEVERE, format("DEADLINE_EXCEEDED: findMissingBlobs(%s) %s: %d remaining of %d %dus%s", requestId, response.worker, response.stillMissingAfter, originalSize, response.elapsedMicros, response.exception != null ? ": " + response.exception.toString() : ""));
}
missingDigestsFuture.setException(status.asException());
} else if (status.getCode() == Code.CANCELLED || Context.current().isCancelled() || !SHARD_IS_RETRIABLE.test(status)) {
// do nothing further if we're cancelled
missingDigestsFuture.setException(status.asException());
} else {
// why not, always
workers.addLast(worker);
}
if (!missingDigestsFuture.isDone()) {
if (workers.isEmpty()) {
missingDigestsFuture.set(blobDigests);
} else {
findMissingBlobsOnWorker(requestId, blobDigests, workers, responses, originalSize, executor, missingDigestsFuture, requestMetadata);
}
}
}
}, executor);
}
use of com.google.cloud.kms.v1.Digest in project bazel-buildfarm by bazelbuild.
the class ShardInstance method transformAndQueue.
private ListenableFuture<Void> transformAndQueue(ExecuteEntry executeEntry, Poller poller, Operation operation, Stopwatch stopwatch, Duration timeout) {
long checkCacheUSecs = stopwatch.elapsed(MICROSECONDS);
ExecuteOperationMetadata metadata;
try {
metadata = operation.getMetadata().unpack(ExecuteOperationMetadata.class);
} catch (InvalidProtocolBufferException e) {
return immediateFailedFuture(e);
}
Digest actionDigest = metadata.getActionDigest();
SettableFuture<Void> queueFuture = SettableFuture.create();
logger.log(Level.FINE, format("ShardInstance(%s): queue(%s): fetching action %s", getName(), operation.getName(), actionDigest.getHash()));
RequestMetadata requestMetadata = executeEntry.getRequestMetadata();
ListenableFuture<Action> actionFuture = catchingAsync(transformAsync(expectAction(actionDigest, requestMetadata), (action) -> {
if (action == null) {
throw Status.NOT_FOUND.asException();
} else if (action.getDoNotCache()) {
// invalidate our action cache result as well as watcher owner
readThroughActionCache.invalidate(DigestUtil.asActionKey(actionDigest));
backplane.putOperation(operation.toBuilder().setMetadata(Any.pack(action)).build(), metadata.getStage());
}
return immediateFuture(action);
}, operationTransformService), StatusException.class, (e) -> {
Status st = Status.fromThrowable(e);
if (st.getCode() == Code.NOT_FOUND) {
PreconditionFailure.Builder preconditionFailure = PreconditionFailure.newBuilder();
preconditionFailure.addViolationsBuilder().setType(VIOLATION_TYPE_MISSING).setSubject("blobs/" + DigestUtil.toString(actionDigest)).setDescription(MISSING_ACTION);
checkPreconditionFailure(actionDigest, preconditionFailure.build());
}
throw st.asRuntimeException();
}, operationTransformService);
QueuedOperation.Builder queuedOperationBuilder = QueuedOperation.newBuilder();
ListenableFuture<ProfiledQueuedOperationMetadata.Builder> queuedFuture = transformAsync(actionFuture, (action) -> {
logger.log(Level.FINE, format("ShardInstance(%s): queue(%s): fetched action %s transforming queuedOperation", getName(), operation.getName(), actionDigest.getHash()));
Stopwatch transformStopwatch = Stopwatch.createStarted();
return transform(transformQueuedOperation(operation.getName(), action, action.getCommandDigest(), action.getInputRootDigest(), queuedOperationBuilder, operationTransformService, requestMetadata), (queuedOperation) -> ProfiledQueuedOperationMetadata.newBuilder().setQueuedOperation(queuedOperation).setQueuedOperationMetadata(buildQueuedOperationMetadata(metadata, requestMetadata, queuedOperation)).setTransformedIn(Durations.fromMicros(transformStopwatch.elapsed(MICROSECONDS))), operationTransformService);
}, operationTransformService);
ListenableFuture<ProfiledQueuedOperationMetadata.Builder> validatedFuture = transformAsync(queuedFuture, (profiledQueuedMetadata) -> {
logger.log(Level.FINE, format("ShardInstance(%s): queue(%s): queuedOperation %s transformed, validating", getName(), operation.getName(), DigestUtil.toString(profiledQueuedMetadata.getQueuedOperationMetadata().getQueuedOperationDigest())));
long startValidateUSecs = stopwatch.elapsed(MICROSECONDS);
/* sync, throws StatusException */
validateQueuedOperation(actionDigest, profiledQueuedMetadata.getQueuedOperation());
return immediateFuture(profiledQueuedMetadata.setValidatedIn(Durations.fromMicros(stopwatch.elapsed(MICROSECONDS) - startValidateUSecs)));
}, operationTransformService);
ListenableFuture<ProfiledQueuedOperationMetadata> queuedOperationCommittedFuture = transformAsync(validatedFuture, (profiledQueuedMetadata) -> {
logger.log(Level.FINE, format("ShardInstance(%s): queue(%s): queuedOperation %s validated, uploading", getName(), operation.getName(), DigestUtil.toString(profiledQueuedMetadata.getQueuedOperationMetadata().getQueuedOperationDigest())));
ByteString queuedOperationBlob = profiledQueuedMetadata.getQueuedOperation().toByteString();
Digest queuedOperationDigest = profiledQueuedMetadata.getQueuedOperationMetadata().getQueuedOperationDigest();
long startUploadUSecs = stopwatch.elapsed(MICROSECONDS);
return transform(writeBlobFuture(queuedOperationDigest, queuedOperationBlob, requestMetadata, timeout), (committedSize) -> profiledQueuedMetadata.setUploadedIn(Durations.fromMicros(stopwatch.elapsed(MICROSECONDS) - startUploadUSecs)).build(), operationTransformService);
}, operationTransformService);
// onQueue call?
addCallback(queuedOperationCommittedFuture, new FutureCallback<ProfiledQueuedOperationMetadata>() {
@Override
public void onSuccess(ProfiledQueuedOperationMetadata profiledQueuedMetadata) {
QueuedOperationMetadata queuedOperationMetadata = profiledQueuedMetadata.getQueuedOperationMetadata();
Operation queueOperation = operation.toBuilder().setMetadata(Any.pack(queuedOperationMetadata)).build();
QueueEntry queueEntry = QueueEntry.newBuilder().setExecuteEntry(executeEntry).setQueuedOperationDigest(queuedOperationMetadata.getQueuedOperationDigest()).setPlatform(profiledQueuedMetadata.getQueuedOperation().getCommand().getPlatform()).build();
try {
ensureCanQueue(stopwatch);
long startQueueUSecs = stopwatch.elapsed(MICROSECONDS);
poller.pause();
backplane.queue(queueEntry, queueOperation);
long elapsedUSecs = stopwatch.elapsed(MICROSECONDS);
long queueUSecs = elapsedUSecs - startQueueUSecs;
logger.log(Level.FINE, format("ShardInstance(%s): queue(%s): %dus checkCache, %dus transform, %dus validate, %dus upload, %dus queue, %dus elapsed", getName(), queueOperation.getName(), checkCacheUSecs, Durations.toMicros(profiledQueuedMetadata.getTransformedIn()), Durations.toMicros(profiledQueuedMetadata.getValidatedIn()), Durations.toMicros(profiledQueuedMetadata.getUploadedIn()), queueUSecs, elapsedUSecs));
queueFuture.set(null);
} catch (IOException e) {
onFailure(e.getCause() == null ? e : e.getCause());
} catch (InterruptedException e) {
// ignore
}
}
@Override
public void onFailure(Throwable t) {
poller.pause();
com.google.rpc.Status status = StatusProto.fromThrowable(t);
if (status == null) {
logger.log(Level.SEVERE, "no rpc status from exception for " + operation.getName(), t);
status = asExecutionStatus(t);
} else if (com.google.rpc.Code.forNumber(status.getCode()) == com.google.rpc.Code.DEADLINE_EXCEEDED) {
logger.log(Level.WARNING, "an rpc status was thrown with DEADLINE_EXCEEDED for " + operation.getName() + ", discarding it", t);
status = com.google.rpc.Status.newBuilder().setCode(com.google.rpc.Code.UNAVAILABLE.getNumber()).setMessage("SUPPRESSED DEADLINE_EXCEEDED: " + t.getMessage()).build();
}
logFailedStatus(actionDigest, status);
errorOperationFuture(operation, requestMetadata, status, queueFuture);
}
}, operationTransformService);
return queueFuture;
}
Aggregations