use of build.buildfarm.common.EntryLimitException in project bazel-buildfarm by bazelbuild.
the class ShardWorkerContext method uploadOutputFile.
private void uploadOutputFile(ActionResult.Builder resultBuilder, String outputFile, Path actionRoot, PreconditionFailure.Builder preconditionFailure) throws IOException, InterruptedException {
Path outputPath = actionRoot.resolve(outputFile);
if (!Files.exists(outputPath)) {
logger.log(Level.FINE, "ReportResultStage: " + outputFile + " does not exist...");
return;
}
if (Files.isDirectory(outputPath)) {
logger.log(Level.FINE, "ReportResultStage: " + outputFile + " is a directory");
preconditionFailure.addViolationsBuilder().setType(VIOLATION_TYPE_INVALID).setSubject(outputFile).setDescription("An output file was a directory");
return;
}
long size = Files.size(outputPath);
long maxEntrySize = execFileSystem.getStorage().maxEntrySize();
if (maxEntrySize != UNLIMITED_ENTRY_SIZE_MAX && size > maxEntrySize) {
preconditionFailure.addViolationsBuilder().setType(VIOLATION_TYPE_MISSING).setSubject(outputFile + ": " + size).setDescription("An output could not be uploaded because it exceeded the maximum size of an entry");
return;
}
// will run into issues if we end up blocking on the cache insertion, might
// want to decrement input references *before* this to ensure that we cannot
// cause an internal deadlock
Digest digest;
try {
digest = getDigestUtil().compute(outputPath);
} catch (NoSuchFileException e) {
return;
}
resultBuilder.addOutputFilesBuilder().setPath(outputFile).setDigest(digest).setIsExecutable(Files.isExecutable(outputPath));
try {
insertFile(digest, outputPath);
} catch (EntryLimitException e) {
preconditionFailure.addViolationsBuilder().setType(VIOLATION_TYPE_MISSING).setSubject("blobs/" + DigestUtil.toString(digest)).setDescription("An output could not be uploaded because it exceeded the maximum size of an entry");
}
}
use of build.buildfarm.common.EntryLimitException in project bazel-buildfarm by bazelbuild.
the class ByteStreamService method queryWriteStatus.
@Override
public void queryWriteStatus(QueryWriteStatusRequest request, StreamObserver<QueryWriteStatusResponse> responseObserver) {
String resourceName = request.getResourceName();
try {
logger.log(Level.FINE, format("queryWriteStatus(%s)", resourceName));
Write write = getWrite(resourceName);
responseObserver.onNext(QueryWriteStatusResponse.newBuilder().setCommittedSize(write.getCommittedSize()).setComplete(write.isComplete()).build());
responseObserver.onCompleted();
logger.log(Level.FINE, format("queryWriteStatus(%s) => committed_size = %d, complete = %s", resourceName, write.getCommittedSize(), write.isComplete()));
} catch (IllegalArgumentException | InvalidResourceNameException e) {
logger.log(Level.SEVERE, format("queryWriteStatus(%s)", resourceName), e);
responseObserver.onError(INVALID_ARGUMENT.withDescription(e.getMessage()).asException());
} catch (EntryLimitException e) {
logger.log(Level.WARNING, format("queryWriteStatus(%s): %s", resourceName, e.getMessage()));
responseObserver.onNext(QueryWriteStatusResponse.getDefaultInstance());
responseObserver.onCompleted();
} catch (RuntimeException e) {
logger.log(Level.SEVERE, format("queryWriteStatus(%s)", resourceName), e);
responseObserver.onError(Status.fromThrowable(e).asException());
}
}
use of build.buildfarm.common.EntryLimitException in project bazel-buildfarm by bazelbuild.
the class CASFileCache method putOrReferenceGuarded.
private CancellableOutputStream putOrReferenceGuarded(String key, UUID writeId, Supplier<Boolean> writeWinner, long blobSizeInBytes, boolean isExecutable, Runnable onInsert, AtomicBoolean requiresDischarge, boolean isReset) throws IOException, InterruptedException {
if (blobSizeInBytes > maxEntrySizeInBytes) {
throw new EntryLimitException(blobSizeInBytes, maxEntrySizeInBytes);
}
if (!charge(key, blobSizeInBytes, requiresDischarge)) {
return DUPLICATE_OUTPUT_STREAM;
}
String writeKey = key + "." + writeId;
Path writePath = getPath(key).resolveSibling(writeKey);
final long committedSize;
final HashingOutputStream hashOut;
if (!isReset && Files.exists(writePath)) {
committedSize = Files.size(writePath);
try (InputStream in = Files.newInputStream(writePath)) {
SkipOutputStream skipStream = new SkipOutputStream(Files.newOutputStream(writePath, APPEND), committedSize);
hashOut = digestUtil.newHashingOutputStream(skipStream);
ByteStreams.copy(in, hashOut);
checkState(skipStream.isSkipped());
}
} else {
committedSize = 0;
hashOut = digestUtil.newHashingOutputStream(Files.newOutputStream(writePath, CREATE));
}
return new CancellableOutputStream(hashOut) {
long written = committedSize;
final Digest expectedDigest = keyToDigest(key, blobSizeInBytes, digestUtil);
@Override
public long getWritten() {
return written;
}
@Override
public Path getPath() {
return writePath;
}
@Override
public void cancel() throws IOException {
try {
hashOut.close();
Files.delete(writePath);
} finally {
dischargeAndNotify(blobSizeInBytes);
}
}
@Override
public void write(int b) throws IOException {
if (written >= blobSizeInBytes) {
throw new IOException(format("attempted overwrite at %d by 1 byte for %s", written, writeKey));
}
hashOut.write(b);
written++;
}
@Override
public void write(byte[] b) throws IOException {
write(b, 0, b.length);
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
if (written + len > blobSizeInBytes) {
throw new IOException(format("attempted overwrite at %d by %d bytes for %s", written, len, writeKey));
}
hashOut.write(b, off, len);
written += len;
}
@Override
public void close() throws IOException {
// has some trouble with multiple closes, fortunately we have something above to handle this
long size = getWritten();
// should probably discharge here as well
hashOut.close();
if (size > blobSizeInBytes) {
String hash = hashOut.hash().toString();
try {
Files.delete(writePath);
} finally {
dischargeAndNotify(blobSizeInBytes);
}
Digest actual = Digest.newBuilder().setHash(hash).setSizeBytes(size).build();
throw new DigestMismatchException(actual, expectedDigest);
}
if (size != blobSizeInBytes) {
throw new IncompleteBlobException(writePath, key, size, blobSizeInBytes);
}
commit();
}
void commit() throws IOException {
String hash = hashOut.hash().toString();
String fileName = writePath.getFileName().toString();
if (!fileName.startsWith(hash)) {
dischargeAndNotify(blobSizeInBytes);
Digest actual = Digest.newBuilder().setHash(hash).setSizeBytes(getWritten()).build();
throw new DigestMismatchException(actual, expectedDigest);
}
try {
setReadOnlyPerms(writePath, isExecutable, fileStore);
} catch (IOException e) {
dischargeAndNotify(blobSizeInBytes);
throw e;
}
Entry entry = new Entry(key, blobSizeInBytes, Deadline.after(10, SECONDS));
Entry existingEntry = null;
boolean inserted = false;
try {
Files.createLink(CASFileCache.this.getPath(key), writePath);
existingEntry = storage.putIfAbsent(key, entry);
inserted = existingEntry == null;
} catch (FileAlreadyExistsException e) {
logger.log(Level.FINE, "file already exists for " + key + ", nonexistent entry will fail");
} finally {
Files.delete(writePath);
if (!inserted) {
dischargeAndNotify(blobSizeInBytes);
}
}
int attempts = 10;
if (!inserted) {
while (existingEntry == null && attempts-- != 0) {
existingEntry = storage.get(key);
try {
MILLISECONDS.sleep(10);
} catch (InterruptedException intEx) {
throw new IOException(intEx);
}
}
if (existingEntry == null) {
throw new IOException("existing entry did not appear for " + key);
}
}
if (existingEntry != null) {
logger.log(Level.FINE, "lost the race to insert " + key);
if (!referenceIfExists(key)) {
// we would lose our accountability and have a presumed reference if we returned
throw new IllegalStateException("storage conflict with existing key for " + key);
}
} else if (writeWinner.get()) {
logger.log(Level.FINE, "won the race to insert " + key);
try {
onInsert.run();
} catch (RuntimeException e) {
throw new IOException(e);
}
} else {
logger.log(Level.FINE, "did not win the race to insert " + key);
}
}
};
}
use of build.buildfarm.common.EntryLimitException in project bazel-buildfarm by bazelbuild.
the class ShardWorkerContext method uploadOutputDirectory.
private void uploadOutputDirectory(ActionResult.Builder resultBuilder, String outputDir, Path actionRoot, PreconditionFailure.Builder preconditionFailure) throws IOException, InterruptedException {
Path outputDirPath = actionRoot.resolve(outputDir);
if (!Files.exists(outputDirPath)) {
logger.log(Level.FINE, "ReportResultStage: " + outputDir + " does not exist...");
return;
}
if (!Files.isDirectory(outputDirPath)) {
logger.log(Level.FINE, "ReportResultStage: " + outputDir + " is not a directory...");
preconditionFailure.addViolationsBuilder().setType(VIOLATION_TYPE_INVALID).setSubject(outputDir).setDescription("An output directory was not a directory");
return;
}
Tree.Builder treeBuilder = Tree.newBuilder();
OutputDirectoryContext outputRoot = new OutputDirectoryContext();
Files.walkFileTree(outputDirPath, new SimpleFileVisitor<Path>() {
OutputDirectoryContext currentDirectory = null;
final Stack<OutputDirectoryContext> path = new Stack<>();
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
Digest digest;
try {
digest = getDigestUtil().compute(file);
} catch (NoSuchFileException e) {
logger.log(Level.SEVERE, format("error visiting file %s under output dir %s", outputDirPath.relativize(file), outputDirPath.toAbsolutePath()), e);
return FileVisitResult.CONTINUE;
}
// should we cast to PosixFilePermissions and do gymnastics there for executable?
// TODO symlink per revision proposal
currentDirectory.addFile(FileNode.newBuilder().setName(file.getFileName().toString()).setDigest(digest).setIsExecutable(Files.isExecutable(file)).build());
try {
insertFile(digest, file);
} catch (InterruptedException e) {
throw new IOException(e);
} catch (EntryLimitException e) {
preconditionFailure.addViolationsBuilder().setType(VIOLATION_TYPE_MISSING).setSubject("blobs/" + DigestUtil.toString(digest)).setDescription("An output could not be uploaded because it exceeded the maximum size of an entry");
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
path.push(currentDirectory);
if (dir.equals(outputDirPath)) {
currentDirectory = outputRoot;
} else {
currentDirectory = new OutputDirectoryContext();
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) {
OutputDirectoryContext parentDirectory = path.pop();
Directory directory = currentDirectory.toDirectory();
if (parentDirectory == null) {
treeBuilder.setRoot(directory);
} else {
parentDirectory.addDirectory(DirectoryNode.newBuilder().setName(dir.getFileName().toString()).setDigest(getDigestUtil().compute(directory)).build());
treeBuilder.addChildren(directory);
}
currentDirectory = parentDirectory;
return FileVisitResult.CONTINUE;
}
});
Tree tree = treeBuilder.build();
ByteString treeBlob = tree.toByteString();
Digest treeDigest = getDigestUtil().compute(treeBlob);
insertBlob(treeDigest, treeBlob);
resultBuilder.addOutputDirectoriesBuilder().setPath(outputDir).setTreeDigest(treeDigest);
}
use of build.buildfarm.common.EntryLimitException in project bazel-buildfarm by bazelbuild.
the class WriteStreamObserver method errorResponse.
private boolean errorResponse(Throwable t) {
if (exception.compareAndSet(null, t)) {
boolean isEntryLimitException = t instanceof EntryLimitException;
if (isEntryLimitException) {
t = Status.OUT_OF_RANGE.withDescription(t.getMessage()).asException();
}
responseObserver.onError(t);
if (isEntryLimitException) {
RequestMetadata requestMetadata = TracingMetadataUtils.fromCurrentContext();
logger.log(Level.WARNING, format("%s-%s: %s -> %s -> %s: exceeded entry limit for %s", requestMetadata.getToolDetails().getToolName(), requestMetadata.getToolDetails().getToolVersion(), requestMetadata.getCorrelatedInvocationsId(), requestMetadata.getToolInvocationId(), requestMetadata.getActionId(), name));
}
return true;
}
return false;
}
Aggregations