use of java.nio.ByteBuffer in project bazel by bazelbuild.
the class ZipIn method entryWith.
/**
* Constructs a zip entry object for the location of the given header, with the corresponding
* directory entry.
*
* @param header local file header for the entry.
* @param dirEntry corresponding directory entry, or {@code null} if not available.
* @return a zip entry with the given header and directory entry.
* @throws IOException
*/
private ZipEntry entryWith(LocalFileHeader header, DirectoryEntry dirEntry) throws IOException {
ZipEntry zipEntry = new ZipEntry().withHeader(header).withEntry(dirEntry);
int offset = (int) (header.fileOffset() + header.getSize());
// !useDirectory || dirEntry != null || !ignoreDeleted
String entryName = header.getFilename();
if (dirEntry != null && !entryName.equals(dirEntry.getFilename())) {
return zipEntry.withEntry(dirEntry).withCode(ZipEntry.Status.FILENAME_ERROR);
}
int sizeByHeader = header.dataSize();
int sizeByDir = dirEntry != null ? dirEntry.dataSize() : -1;
ByteBuffer content;
if (sizeByDir == sizeByHeader && sizeByDir >= 0) {
// Ideal case, header and directory in agreement
content = getData(offset, sizeByHeader);
if (content.limit() == sizeByHeader) {
return zipEntry.withContent(content).withCode(ZipEntry.Status.ENTRY_OK);
} else {
return zipEntry.withContent(content).withCode(ZipEntry.Status.NOT_ENOUGH_DATA);
}
}
if (sizeByDir >= 0) {
// If file is correct, we get here because of a 0x8 flag, and we expect
// data to be followed by a data descriptor.
content = getData(offset, sizeByDir);
DataDescriptor dataDesc = descriptorAt(offset + sizeByDir, dirEntry);
if (dataDesc != null) {
return zipEntry.withContent(content).withDescriptor(dataDesc).withCode(ZipEntry.Status.ENTRY_OK);
}
return zipEntry.withContent(content).withCode(ZipEntry.Status.NO_DATA_DESC);
}
if (!ignoreDeleted) {
if (sizeByHeader >= 0) {
content = getData(offset, sizeByHeader);
if (content.limit() == sizeByHeader) {
return zipEntry.withContent(content).withCode(ZipEntry.Status.ENTRY_OK);
}
return zipEntry.withContent(content).withCode(ZipEntry.Status.NOT_ENOUGH_DATA);
} else {
DataDescriptor dataDesc = descriptorFrom(offset, dirEntry);
if (dataDesc == null) {
// Only way now would be to decompress
return zipEntry.withCode(ZipEntry.Status.UNKNOWN_SIZE);
}
int sizeByDesc = dataDesc.get(EXTSIZ);
if (sizeByDesc != dataDesc.fileOffset() - offset) {
// That just can't be the right
return zipEntry.withDescriptor(dataDesc).withCode(ZipEntry.Status.UNKNOWN_SIZE);
}
content = getData(offset, sizeByDesc);
return zipEntry.withContent(content).withDescriptor(dataDesc).withCode(ZipEntry.Status.ENTRY_OK);
}
}
return zipEntry.withCode(ZipEntry.Status.UNKNOWN_SIZE);
}
use of java.nio.ByteBuffer in project bazel by bazelbuild.
the class ZipIn method loadEndOfCentralDirectory.
/**
* Locates the "end of central directory" record, expected located at the end of the file, and
* reads it into a byte buffer. Called on the first invocation of
* {@link #endOfCentralDirectory() }.
*
* @throws IOException
*/
protected void loadEndOfCentralDirectory() throws IOException {
cdir = null;
long size = fileChannel.size();
verbose("Loading ZipIn: " + filename);
verbose("-- size: " + size);
int cap = (int) Math.min(size, MAX_EOCD_SIZE);
ByteBuffer buffer = ByteBuffer.allocate(cap).order(ByteOrder.LITTLE_ENDIAN);
long offset = size - cap;
while (true) {
fileChannel.position(offset);
while (buffer.hasRemaining()) {
fileChannel.read(buffer, offset);
}
// scan to find it...
int endOfDirOffset = ScanUtil.scanBackwardsTo(EOCD_SIG, buffer);
if (endOfDirOffset < 0) {
if (offset == 0) {
if (useDirectory) {
throw new IllegalStateException("No end of central directory marker");
} else {
break;
}
}
offset = Math.max(offset - 1000, 0);
buffer.clear();
continue;
}
long eocdFileOffset = offset + endOfDirOffset;
verbose("-- EOCD: " + eocdFileOffset + " size: " + (size - eocdFileOffset));
buffer.position(endOfDirOffset);
eocd = EndOfCentralDirectory.viewOf(buffer).at(offset + endOfDirOffset);
// a file comment, and resume the search.
break;
}
if (eocd != null) {
bufferedFile = new BufferedFile(fileChannel, 0, eocd.get(ENDOFF), READ_BLOCK_SIZE);
} else {
bufferedFile = new BufferedFile(fileChannel, READ_BLOCK_SIZE);
}
}
use of java.nio.ByteBuffer in project bazel by bazelbuild.
the class LocalFileHeader method allocate.
/**
* Creates a {@code LocalFileHeader} with a heap allocated buffer. Apart from the signature
* and extra data data (if any), the returned object is uninitialized.
*
* @param name entry file name. Cannot be {@code null}.
* @param extraData extra data, or {@code null}
* @return a {@code LocalFileHeader} with a heap allocated buffer.
*/
public static LocalFileHeader allocate(String name, byte[] extraData) {
byte[] nameData = name.getBytes(UTF_8);
if (extraData == null) {
extraData = EMPTY;
}
int size = SIZE + nameData.length + extraData.length;
ByteBuffer buffer = ByteBuffer.allocate(size).order(LITTLE_ENDIAN);
return new LocalFileHeader(buffer).init(nameData, extraData, size);
}
use of java.nio.ByteBuffer in project glide by bumptech.
the class ReEncodingGifResourceEncoderTest method testSetsDataOnParserBeforeParsingHeader.
@Test
public void testSetsDataOnParserBeforeParsingHeader() {
ByteBuffer data = ByteBuffer.allocate(1);
when(gifDrawable.getBuffer()).thenReturn(data);
GifHeader header = mock(GifHeader.class);
when(parser.parseHeader()).thenReturn(header);
encoder.encode(resource, file, options);
InOrder order = inOrder(parser, decoder);
order.verify(parser).setData(eq(data));
order.verify(parser).parseHeader();
order.verify(decoder).setData(header, data);
}
use of java.nio.ByteBuffer in project jvm-tools by aragozin.
the class ByteBufferPageManager method refill.
private void refill() {
if (!atLimit && (memoryUsed + resourcePageSize <= memoryLimit)) {
ByteBuffer bb = allocatePage();
if (bb == null) {
atLimit = true;
} else {
memoryUsed += resourcePageSize;
resourcePages.add(bb);
int n = resourcePageSize / pageSize;
for (int i = 0; i != n; ++i) {
int offs = i * pageSize;
bb.limit(offs + pageSize);
bb.position(offs);
ByteBuffer sb = bb.slice();
BW bw = new BW(sb);
freePages.add(bw);
allPages.put(sb, bw);
}
}
}
}
Aggregations