use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class TestChecksum method testChecksumInternals.
protected void testChecksumInternals(boolean useTags) throws IOException {
Compression.Algorithm algo = NONE;
for (boolean pread : new boolean[] { false, true }) {
for (int bytesPerChecksum : BYTES_PER_CHECKSUM) {
Path path = new Path(TEST_UTIL.getDataTestDir(), "checksumChunk_" + algo + bytesPerChecksum);
FSDataOutputStream os = fs.create(path);
HFileContext meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(true).withIncludesTags(useTags).withHBaseCheckSum(true).withBytesPerCheckSum(bytesPerChecksum).build();
HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, meta);
// write one block. The block has data
// that is at least 6 times more than the checksum chunk size
long dataSize = 0;
DataOutputStream dos = hbw.startWriting(BlockType.DATA);
for (; dataSize < 6 * bytesPerChecksum; ) {
for (int i = 0; i < 1234; ++i) {
dos.writeInt(i);
dataSize += 4;
}
}
hbw.writeHeaderAndData(os);
long totalSize = hbw.getOnDiskSizeWithHeader();
os.close();
long expectedChunks = ChecksumUtil.numChunks(dataSize + HConstants.HFILEBLOCK_HEADER_SIZE, bytesPerChecksum);
LOG.info("testChecksumChunks: pread={}, bytesPerChecksum={}, fileSize={}, " + "dataSize={}, expectedChunks={}, compression={}", pread, bytesPerChecksum, totalSize, dataSize, expectedChunks, algo.toString());
// Verify hbase checksums.
assertEquals(true, hfs.useHBaseChecksum());
// Read data back from file.
FSDataInputStream is = fs.open(path);
FSDataInputStream nochecksum = hfs.getNoChecksumFs().open(path);
meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(true).withIncludesTags(useTags).withHBaseCheckSum(true).withBytesPerCheckSum(bytesPerChecksum).build();
ReaderContext context = new ReaderContextBuilder().withInputStreamWrapper(new FSDataInputStreamWrapper(is, nochecksum)).withFileSize(totalSize).withFileSystem(hfs).withFilePath(path).build();
HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, ByteBuffAllocator.HEAP, TEST_UTIL.getConfiguration());
HFileBlock b = hbr.readBlockData(0, -1, pread, false, true);
assertTrue(b.getBufferReadOnly() instanceof SingleByteBuff);
is.close();
b.sanityCheck();
assertEquals(dataSize, b.getUncompressedSizeWithoutHeader());
// verify that we have the expected number of checksum chunks
assertEquals(totalSize, HConstants.HFILEBLOCK_HEADER_SIZE + dataSize + expectedChunks * HFileBlock.CHECKSUM_SIZE);
// assert that we did not encounter hbase checksum verification failures
assertEquals(0, HFile.getAndResetChecksumFailuresCount());
}
}
}
use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class TestChecksum method testVerifyCheckSum.
@Test
public void testVerifyCheckSum() throws IOException {
int intCount = 10000;
for (ChecksumType ckt : ChecksumType.values()) {
Path path = new Path(TEST_UTIL.getDataTestDir(), "checksum" + ckt.getName());
FSDataOutputStream os = fs.create(path);
HFileContext meta = new HFileContextBuilder().withChecksumType(ckt).build();
HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, meta);
DataOutputStream dos = hbw.startWriting(BlockType.DATA);
for (int i = 0; i < intCount; ++i) {
dos.writeInt(i);
}
hbw.writeHeaderAndData(os);
int totalSize = hbw.getOnDiskSizeWithHeader();
os.close();
// Use hbase checksums.
assertEquals(true, hfs.useHBaseChecksum());
FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
meta = new HFileContextBuilder().withHBaseCheckSum(true).build();
ReaderContext context = new ReaderContextBuilder().withInputStreamWrapper(is).withFileSize(totalSize).withFileSystem((HFileSystem) fs).withFilePath(path).build();
HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, ByteBuffAllocator.HEAP, TEST_UTIL.getConfiguration());
HFileBlock b = hbr.readBlockData(0, -1, false, false, true);
assertTrue(!b.isSharedMem());
// verify SingleByteBuff checksum.
verifySBBCheckSum(b.getBufferReadOnly());
// verify MultiByteBuff checksum.
verifyMBBCheckSum(b.getBufferReadOnly());
ByteBuff data = b.getBufferWithoutHeader();
for (int i = 0; i < intCount; i++) {
assertEquals(i, data.getInt());
}
try {
data.getInt();
fail();
} catch (BufferUnderflowException e) {
// expected failure
}
assertEquals(0, HFile.getAndResetChecksumFailuresCount());
}
}
use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class TestBlockIOUtils method testByteBufferPositionedReadable.
@Test
public void testByteBufferPositionedReadable() throws IOException {
assumeTrue("Skip the test because ByteBufferPositionedReadable is not available", isByteBufferPositionedReadable());
long position = 0;
int necessaryLen = 10;
int extraLen = 1;
int totalLen = necessaryLen + extraLen;
int firstReadLen = 6;
int secondReadLen = totalLen - firstReadLen;
ByteBuffer buf = ByteBuffer.allocate(totalLen);
ByteBuff bb = new SingleByteBuff(buf);
MyFSDataInputStream in = mock(MyFSDataInputStream.class);
when(in.read(position, buf)).thenReturn(firstReadLen);
when(in.read(firstReadLen, buf)).thenReturn(secondReadLen);
when(in.hasCapability(anyString())).thenReturn(true);
boolean ret = BlockIOUtils.preadWithExtra(bb, in, position, necessaryLen, extraLen);
assertTrue("Expect true return when reading extra bytes succeeds", ret);
verify(in).read(position, buf);
verify(in).read(firstReadLen, buf);
verify(in).hasCapability(anyString());
verifyNoMoreInteractions(in);
}
use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class TestBlockIOUtils method testPositionalReadExtraSucceeded.
@Test
public void testPositionalReadExtraSucceeded() throws IOException {
long position = 0;
int bufOffset = 0;
int necessaryLen = 10;
int extraLen = 5;
int totalLen = necessaryLen + extraLen;
byte[] buf = new byte[totalLen];
ByteBuff bb = new SingleByteBuff(ByteBuffer.wrap(buf, 0, totalLen));
FSDataInputStream in = mock(FSDataInputStream.class);
when(in.read(position, buf, bufOffset, totalLen)).thenReturn(totalLen);
when(in.hasCapability(anyString())).thenReturn(false);
boolean ret = BlockIOUtils.preadWithExtra(bb, in, position, necessaryLen, extraLen);
assertTrue("Expect true return when reading extra bytes succeeds", ret);
verify(in).read(position, buf, bufOffset, totalLen);
verify(in).hasCapability(anyString());
verifyNoMoreInteractions(in);
}
use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class TestBlockIOUtils method testPositionalReadShortReadCompletesNecessaryAndExtraBytes.
@Test
public void testPositionalReadShortReadCompletesNecessaryAndExtraBytes() throws IOException {
long position = 0;
int bufOffset = 0;
int necessaryLen = 10;
int extraLen = 5;
int totalLen = necessaryLen + extraLen;
byte[] buf = new byte[totalLen];
ByteBuff bb = new SingleByteBuff(ByteBuffer.wrap(buf, 0, totalLen));
FSDataInputStream in = mock(FSDataInputStream.class);
when(in.read(position, buf, bufOffset, totalLen)).thenReturn(5);
when(in.read(5, buf, 5, 10)).thenReturn(10);
when(in.hasCapability(anyString())).thenReturn(false);
boolean ret = BlockIOUtils.preadWithExtra(bb, in, position, necessaryLen, extraLen);
assertTrue("Expect true return when reading extra bytes succeeds", ret);
verify(in).read(position, buf, bufOffset, totalLen);
verify(in).read(5, buf, 5, 10);
verify(in).hasCapability(anyString());
verifyNoMoreInteractions(in);
}
Aggregations