use of build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.Response in project tools_remote by bazelbuild.
the class ActionGroupingTest method makeExecuteWithStatus.
private RpcCallDetails makeExecuteWithStatus(int status) {
ExecuteResponse.Builder response = ExecuteResponse.newBuilder();
response.getStatusBuilder().setCode(status);
Operation operation = Operation.newBuilder().setResponse(Any.pack(response.build())).setDone(true).build();
ExecuteDetails execute = ExecuteDetails.newBuilder().addResponses(operation).build();
return RpcCallDetails.newBuilder().setExecute(execute).build();
}
use of build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.Response in project tools_remote by bazelbuild.
the class ActionGroupingTest method makeWatch.
private RpcCallDetails makeWatch(ActionResult result) {
ExecuteResponse response = ExecuteResponse.newBuilder().setResult(result).build();
Operation operation = Operation.newBuilder().setResponse(Any.pack(response)).setDone(true).build();
WaitExecutionDetails waitExecution = WaitExecutionDetails.newBuilder().addResponses(operation).build();
return RpcCallDetails.newBuilder().setWaitExecution(waitExecution).build();
}
use of build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.Response in project bazel-buildfarm by bazelbuild.
the class ShardInstance method findMissingBlobsOnWorker.
private void findMissingBlobsOnWorker(String requestId, Iterable<Digest> blobDigests, Deque<String> workers, ImmutableList.Builder<FindMissingResponseEntry> responses, int originalSize, Executor executor, SettableFuture<Iterable<Digest>> missingDigestsFuture, RequestMetadata requestMetadata) {
String worker = workers.removeFirst();
ListenableFuture<Iterable<Digest>> workerMissingBlobsFuture = workerStub(worker).findMissingBlobs(blobDigests, requestMetadata);
Stopwatch stopwatch = Stopwatch.createStarted();
addCallback(workerMissingBlobsFuture, new FutureCallback<Iterable<Digest>>() {
@Override
public void onSuccess(Iterable<Digest> missingDigests) {
if (Iterables.isEmpty(missingDigests) || workers.isEmpty()) {
missingDigestsFuture.set(missingDigests);
} else {
responses.add(new FindMissingResponseEntry(worker, stopwatch.elapsed(MICROSECONDS), null, Iterables.size(missingDigests)));
findMissingBlobsOnWorker(requestId, missingDigests, workers, responses, originalSize, executor, missingDigestsFuture, requestMetadata);
}
}
@Override
public void onFailure(Throwable t) {
responses.add(new FindMissingResponseEntry(worker, stopwatch.elapsed(MICROSECONDS), t, Iterables.size(blobDigests)));
Status status = Status.fromThrowable(t);
if (status.getCode() == Code.UNAVAILABLE || status.getCode() == Code.UNIMPLEMENTED) {
removeMalfunctioningWorker(worker, t, "findMissingBlobs(" + requestId + ")");
} else if (status.getCode() == Code.DEADLINE_EXCEEDED) {
for (FindMissingResponseEntry response : responses.build()) {
logger.log(response.exception == null ? Level.WARNING : Level.SEVERE, format("DEADLINE_EXCEEDED: findMissingBlobs(%s) %s: %d remaining of %d %dus%s", requestId, response.worker, response.stillMissingAfter, originalSize, response.elapsedMicros, response.exception != null ? ": " + response.exception.toString() : ""));
}
missingDigestsFuture.setException(status.asException());
} else if (status.getCode() == Code.CANCELLED || Context.current().isCancelled() || !SHARD_IS_RETRIABLE.test(status)) {
// do nothing further if we're cancelled
missingDigestsFuture.setException(status.asException());
} else {
// why not, always
workers.addLast(worker);
}
if (!missingDigestsFuture.isDone()) {
if (workers.isEmpty()) {
missingDigestsFuture.set(blobDigests);
} else {
findMissingBlobsOnWorker(requestId, blobDigests, workers, responses, originalSize, executor, missingDigestsFuture, requestMetadata);
}
}
}
}, executor);
}
use of build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.Response in project bazel-buildfarm by bazelbuild.
the class StubInstance method getTree.
@Override
public String getTree(Digest rootDigest, int pageSize, String pageToken, Tree.Builder tree) {
tree.setRootDigest(rootDigest);
throwIfStopped();
Iterator<GetTreeResponse> replies = deadlined(casBlockingStub).getTree(GetTreeRequest.newBuilder().setInstanceName(getName()).setRootDigest(rootDigest).setPageSize(pageSize).setPageToken(pageToken).build());
// new streaming interface doesn't really fit with what we're trying to do here...
String nextPageToken = "";
while (replies.hasNext()) {
GetTreeResponse response = replies.next();
for (Directory directory : response.getDirectoriesList()) {
tree.putDirectories(digestUtil.compute(directory).getHash(), directory);
}
nextPageToken = response.getNextPageToken();
}
return nextPageToken;
}
use of build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.Response in project bazel-buildfarm by bazelbuild.
the class Executor method loadFilesIntoCAS.
private static void loadFilesIntoCAS(String instanceName, Channel channel, Path blobsDir) throws Exception {
ContentAddressableStorageBlockingStub casStub = ContentAddressableStorageGrpc.newBlockingStub(channel);
List<Digest> missingDigests = findMissingBlobs(instanceName, blobsDir, casStub);
UUID uploadId = UUID.randomUUID();
int[] bucketSizes = new int[128];
BatchUpdateBlobsRequest.Builder[] buckets = new BatchUpdateBlobsRequest.Builder[128];
for (int i = 0; i < 128; i++) {
bucketSizes[i] = 0;
buckets[i] = BatchUpdateBlobsRequest.newBuilder().setInstanceName(instanceName);
}
ByteStreamStub bsStub = ByteStreamGrpc.newStub(channel);
for (Digest missingDigest : missingDigests) {
Path path = blobsDir.resolve(missingDigest.getHash() + "_" + missingDigest.getSizeBytes());
if (missingDigest.getSizeBytes() < Size.mbToBytes(1)) {
Request request = Request.newBuilder().setDigest(missingDigest).setData(ByteString.copyFrom(Files.readAllBytes(path))).build();
int maxBucketSize = 0;
long minBucketSize = Size.mbToBytes(2) + 1;
int maxBucketIndex = 0;
int minBucketIndex = -1;
int size = (int) missingDigest.getSizeBytes() + 48;
for (int i = 0; i < 128; i++) {
int newBucketSize = bucketSizes[i] + size;
if (newBucketSize < Size.mbToBytes(2) && bucketSizes[i] < minBucketSize) {
minBucketSize = bucketSizes[i];
minBucketIndex = i;
}
if (bucketSizes[i] > maxBucketSize) {
maxBucketSize = bucketSizes[i];
maxBucketIndex = i;
}
}
if (minBucketIndex < 0) {
bucketSizes[maxBucketIndex] = size;
BatchUpdateBlobsRequest batchRequest = buckets[maxBucketIndex].build();
Stopwatch stopwatch = Stopwatch.createStarted();
BatchUpdateBlobsResponse batchResponse = casStub.batchUpdateBlobs(batchRequest);
long usecs = stopwatch.elapsed(MICROSECONDS);
checkState(batchResponse.getResponsesList().stream().allMatch(response -> Code.forNumber(response.getStatus().getCode()) == Code.OK));
System.out.println("Updated " + batchRequest.getRequestsCount() + " blobs in " + (usecs / 1000.0) + "ms");
buckets[maxBucketIndex] = BatchUpdateBlobsRequest.newBuilder().setInstanceName(instanceName).addRequests(request);
} else {
bucketSizes[minBucketIndex] += size;
buckets[minBucketIndex].addRequests(request);
}
} else {
Stopwatch stopwatch = Stopwatch.createStarted();
SettableFuture<WriteResponse> writtenFuture = SettableFuture.create();
StreamObserver<WriteRequest> requestObserver = bsStub.write(new StreamObserver<WriteResponse>() {
@Override
public void onNext(WriteResponse response) {
writtenFuture.set(response);
}
@Override
public void onCompleted() {
}
@Override
public void onError(Throwable t) {
writtenFuture.setException(t);
}
});
HashCode hash = HashCode.fromString(missingDigest.getHash());
String resourceName = uploadResourceName(instanceName, uploadId, hash, missingDigest.getSizeBytes());
try (InputStream in = Files.newInputStream(path)) {
boolean first = true;
long writtenBytes = 0;
byte[] buf = new byte[64 * 1024];
while (writtenBytes != missingDigest.getSizeBytes()) {
int len = in.read(buf);
WriteRequest.Builder request = WriteRequest.newBuilder();
if (first) {
request.setResourceName(resourceName);
}
request.setData(ByteString.copyFrom(buf, 0, len)).setWriteOffset(writtenBytes);
if (writtenBytes + len == missingDigest.getSizeBytes()) {
request.setFinishWrite(true);
}
requestObserver.onNext(request.build());
writtenBytes += len;
first = false;
}
writtenFuture.get();
System.out.println("Wrote long " + DigestUtil.toString(missingDigest) + " in " + (stopwatch.elapsed(MICROSECONDS) / 1000.0) + "ms");
}
}
}
for (int i = 0; i < 128; i++) {
if (bucketSizes[i] > 0) {
BatchUpdateBlobsRequest batchRequest = buckets[i].build();
Stopwatch stopwatch = Stopwatch.createStarted();
BatchUpdateBlobsResponse batchResponse = casStub.batchUpdateBlobs(batchRequest);
long usecs = stopwatch.elapsed(MICROSECONDS);
checkState(batchResponse.getResponsesList().stream().allMatch(response -> Code.forNumber(response.getStatus().getCode()) == Code.OK));
System.out.println("Updated " + batchRequest.getRequestsCount() + " blobs in " + (usecs / 1000.0) + "ms");
}
}
}
Aggregations