use of java.io.DataOutputStream in project kafka by apache.
the class SimpleRecordTest method testCompressedIterationWithNullValue.
@Test(expected = InvalidRecordException.class)
public void testCompressedIterationWithNullValue() throws Exception {
ByteBuffer buffer = ByteBuffer.allocate(128);
DataOutputStream out = new DataOutputStream(new ByteBufferOutputStream(buffer));
LogEntry.writeHeader(out, 0L, Record.RECORD_OVERHEAD_V1);
Record.write(out, Record.CURRENT_MAGIC_VALUE, 1L, null, null, CompressionType.GZIP, TimestampType.CREATE_TIME);
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
for (Record record : records.records()) fail("Iteration should have caused invalid record error");
}
use of java.io.DataOutputStream in project hbase by apache.
the class TestChecksum method testAllChecksumTypes.
/**
* Test all checksum types by writing and reading back blocks.
*/
@Test
public void testAllChecksumTypes() throws IOException {
List<ChecksumType> cktypes = new ArrayList<>(Arrays.asList(ChecksumType.values()));
for (Iterator<ChecksumType> itr = cktypes.iterator(); itr.hasNext(); ) {
ChecksumType cktype = itr.next();
Path path = new Path(TEST_UTIL.getDataTestDir(), "checksum" + cktype.getName());
FSDataOutputStream os = fs.create(path);
HFileContext meta = new HFileContextBuilder().withChecksumType(cktype).build();
HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
DataOutputStream dos = hbw.startWriting(BlockType.DATA);
for (int i = 0; i < 1000; ++i) {
dos.writeInt(i);
}
hbw.writeHeaderAndData(os);
int totalSize = hbw.getOnDiskSizeWithHeader();
os.close();
// Use hbase checksums.
assertEquals(true, hfs.useHBaseChecksum());
FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
meta = new HFileContextBuilder().withHBaseCheckSum(true).build();
HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(is, totalSize, (HFileSystem) fs, path, meta);
HFileBlock b = hbr.readBlockData(0, -1, false);
ByteBuff data = b.getBufferWithoutHeader();
for (int i = 0; i < 1000; i++) {
assertEquals(i, data.getInt());
}
boolean exception_thrown = false;
try {
data.getInt();
} catch (BufferUnderflowException e) {
exception_thrown = true;
}
assertTrue(exception_thrown);
assertEquals(0, HFile.getChecksumFailuresCount());
}
}
use of java.io.DataOutputStream in project hbase by apache.
the class TestPrefixTreeEncoding method testSeekBeforeWithFixedData.
@Test
public void testSeekBeforeWithFixedData() throws Exception {
formatRowNum = true;
PrefixTreeCodec encoder = new PrefixTreeCodec();
int batchId = numBatchesWritten++;
HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(false).withIncludesTags(includesTag).withCompression(Algorithm.NONE).build();
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
ByteArrayOutputStream baosInMemory = new ByteArrayOutputStream();
DataOutputStream userDataStream = new DataOutputStream(baosInMemory);
generateFixedTestData(kvset, batchId, false, includesTag, encoder, blkEncodingCtx, userDataStream);
EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR, encoder.newDataBlockDecodingContext(meta));
byte[] onDiskBytes = baosInMemory.toByteArray();
ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE, onDiskBytes.length - DataBlockEncoding.ID_SIZE);
seeker.setCurrentBuffer(new SingleByteBuff(readBuffer));
// Seek before the first keyvalue;
Cell seekKey = CellUtil.createFirstDeleteFamilyCellOnRow(getRowKey(batchId, 0), CF_BYTES);
seeker.seekToKeyInBlock(seekKey, true);
assertEquals(null, seeker.getCell());
// Seek before the middle keyvalue;
seekKey = CellUtil.createFirstDeleteFamilyCellOnRow(getRowKey(batchId, NUM_ROWS_PER_BATCH / 3), CF_BYTES);
seeker.seekToKeyInBlock(seekKey, true);
assertNotNull(seeker.getCell());
assertArrayEquals(getRowKey(batchId, NUM_ROWS_PER_BATCH / 3 - 1), CellUtil.cloneRow(seeker.getCell()));
// Seek before the last keyvalue;
seekKey = CellUtil.createFirstDeleteFamilyCellOnRow(Bytes.toBytes("zzzz"), CF_BYTES);
seeker.seekToKeyInBlock(seekKey, true);
assertNotNull(seeker.getCell());
assertArrayEquals(getRowKey(batchId, NUM_ROWS_PER_BATCH - 1), CellUtil.cloneRow(seeker.getCell()));
}
use of java.io.DataOutputStream in project hbase by apache.
the class TestHFileDataBlockEncoder method writeBlock.
private void writeBlock(List<Cell> kvs, HFileContext fileContext, boolean useTags) throws IOException {
HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(blockEncoder.getDataBlockEncoding(), HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
baos.write(HConstants.HFILEBLOCK_DUMMY_HEADER);
DataOutputStream dos = new DataOutputStream(baos);
blockEncoder.startBlockEncoding(context, dos);
for (Cell kv : kvs) {
blockEncoder.encode(kv, context, dos);
}
}
use of java.io.DataOutputStream in project hbase by apache.
the class TestHFileDataBlockEncoder method createBlockOnDisk.
private HFileBlock createBlockOnDisk(List<KeyValue> kvs, HFileBlock block, boolean useTags) throws IOException {
int size;
HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(blockEncoder.getDataBlockEncoding(), HConstants.HFILEBLOCK_DUMMY_HEADER, block.getHFileContext());
ByteArrayOutputStream baos = new ByteArrayOutputStream();
baos.write(block.getDummyHeaderForVersion());
DataOutputStream dos = new DataOutputStream(baos);
blockEncoder.startBlockEncoding(context, dos);
for (KeyValue kv : kvs) {
blockEncoder.encode(kv, context, dos);
}
blockEncoder.endBlockEncoding(context, dos, baos.getBuffer(), BlockType.DATA);
byte[] encodedBytes = baos.toByteArray();
size = encodedBytes.length - block.getDummyHeaderForVersion().length;
return new HFileBlock(context.getBlockType(), size, size, -1, ByteBuffer.wrap(encodedBytes), HFileBlock.FILL_HEADER, 0, block.getOnDiskDataSizeWithHeader(), -1, block.getHFileContext());
}
Aggregations