use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class CodecPerformance method getRoughSize.
static int getRoughSize(final Cell[] cells) {
int size = 0;
for (Cell c : cells) {
size += c.getRowLength() + c.getFamilyLength() + c.getQualifierLength() + c.getValueLength();
size += Bytes.SIZEOF_LONG + Bytes.SIZEOF_BYTE;
}
return size;
}
use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class TestHFileEncryption method testHFileEncryption.
@Test(timeout = 6000000)
public void testHFileEncryption() throws Exception {
// Create 1000 random test KVs
RedundantKVGenerator generator = new RedundantKVGenerator();
List<KeyValue> testKvs = generator.generateTestKeyValues(1000);
// Iterate through data block encoding and compression combinations
Configuration conf = TEST_UTIL.getConfiguration();
CacheConfig cacheConf = new CacheConfig(conf);
for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
for (Compression.Algorithm compression : TestHFileBlock.COMPRESSION_ALGORITHMS) {
HFileContext fileContext = new HFileContextBuilder().withBlockSize(// small blocks
4096).withEncryptionContext(cryptoContext).withCompression(compression).withDataBlockEncoding(encoding).build();
// write a new test HFile
LOG.info("Writing with " + fileContext);
Path path = new Path(TEST_UTIL.getDataTestDir(), UUID.randomUUID().toString() + ".hfile");
FSDataOutputStream out = fs.create(path);
HFile.Writer writer = HFile.getWriterFactory(conf, cacheConf).withOutputStream(out).withFileContext(fileContext).create();
try {
for (KeyValue kv : testKvs) {
writer.append(kv);
}
} finally {
writer.close();
out.close();
}
// read it back in
LOG.info("Reading with " + fileContext);
int i = 0;
HFileScanner scanner = null;
HFile.Reader reader = HFile.createReader(fs, path, cacheConf, conf);
try {
reader.loadFileInfo();
FixedFileTrailer trailer = reader.getTrailer();
assertNotNull(trailer.getEncryptionKey());
scanner = reader.getScanner(false, false);
assertTrue("Initial seekTo failed", scanner.seekTo());
do {
Cell kv = scanner.getCell();
assertTrue("Read back an unexpected or invalid KV", testKvs.contains(KeyValueUtil.ensureKeyValue(kv)));
i++;
} while (scanner.next());
} finally {
reader.close();
scanner.close();
}
assertEquals("Did not read back as many KVs as written", i, testKvs.size());
// Test random seeks with pread
LOG.info("Random seeking with " + fileContext);
reader = HFile.createReader(fs, path, cacheConf, conf);
try {
scanner = reader.getScanner(false, true);
assertTrue("Initial seekTo failed", scanner.seekTo());
for (i = 0; i < 100; i++) {
KeyValue kv = testKvs.get(RNG.nextInt(testKvs.size()));
assertEquals("Unable to find KV as expected: " + kv, scanner.seekTo(kv), 0);
}
} finally {
scanner.close();
reader.close();
}
}
}
}
use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class TestHFileWriterV3 method writeDataAndReadFromHFile.
private void writeDataAndReadFromHFile(Path hfilePath, Algorithm compressAlgo, int entryCount, boolean findMidKey, boolean useTags) throws IOException {
HFileContext context = new HFileContextBuilder().withBlockSize(4096).withIncludesTags(useTags).withCompression(compressAlgo).build();
HFile.Writer writer = new HFile.WriterFactory(conf, new CacheConfig(conf)).withPath(fs, hfilePath).withFileContext(context).withComparator(CellComparator.COMPARATOR).create();
// Just a fixed seed.
Random rand = new Random(9713312);
List<KeyValue> keyValues = new ArrayList<>(entryCount);
for (int i = 0; i < entryCount; ++i) {
byte[] keyBytes = RandomKeyValueUtil.randomOrderedKey(rand, i);
// A random-length random value.
byte[] valueBytes = RandomKeyValueUtil.randomValue(rand);
KeyValue keyValue = null;
if (useTags) {
ArrayList<Tag> tags = new ArrayList<>();
for (int j = 0; j < 1 + rand.nextInt(4); j++) {
byte[] tagBytes = new byte[16];
rand.nextBytes(tagBytes);
tags.add(new ArrayBackedTag((byte) 1, tagBytes));
}
keyValue = new KeyValue(keyBytes, null, null, HConstants.LATEST_TIMESTAMP, valueBytes, tags);
} else {
keyValue = new KeyValue(keyBytes, null, null, HConstants.LATEST_TIMESTAMP, valueBytes);
}
writer.append(keyValue);
keyValues.add(keyValue);
}
// Add in an arbitrary order. They will be sorted lexicographically by
// the key.
writer.appendMetaBlock("CAPITAL_OF_USA", new Text("Washington, D.C."));
writer.appendMetaBlock("CAPITAL_OF_RUSSIA", new Text("Moscow"));
writer.appendMetaBlock("CAPITAL_OF_FRANCE", new Text("Paris"));
writer.close();
FSDataInputStream fsdis = fs.open(hfilePath);
long fileSize = fs.getFileStatus(hfilePath).getLen();
FixedFileTrailer trailer = FixedFileTrailer.readFromStream(fsdis, fileSize);
assertEquals(3, trailer.getMajorVersion());
assertEquals(entryCount, trailer.getEntryCount());
HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo).withIncludesMvcc(false).withIncludesTags(useTags).withHBaseCheckSum(true).build();
HFileBlock.FSReader blockReader = new HFileBlock.FSReaderImpl(fsdis, fileSize, meta);
// Comparator class name is stored in the trailer in version 3.
CellComparator comparator = trailer.createComparator();
HFileBlockIndex.BlockIndexReader dataBlockIndexReader = new HFileBlockIndex.CellBasedKeyBlockIndexReader(comparator, trailer.getNumDataIndexLevels());
HFileBlockIndex.BlockIndexReader metaBlockIndexReader = new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1);
HFileBlock.BlockIterator blockIter = blockReader.blockRange(trailer.getLoadOnOpenDataOffset(), fileSize - trailer.getTrailerSize());
// Data index. We also read statistics about the block index written after
// the root level.
dataBlockIndexReader.readMultiLevelIndexRoot(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), trailer.getDataIndexCount());
if (findMidKey) {
Cell midkey = dataBlockIndexReader.midkey();
assertNotNull("Midkey should not be null", midkey);
}
// Meta index.
metaBlockIndexReader.readRootIndex(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX).getByteStream(), trailer.getMetaIndexCount());
// File info
FileInfo fileInfo = new FileInfo();
fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
byte[] keyValueFormatVersion = fileInfo.get(HFileWriterImpl.KEY_VALUE_VERSION);
boolean includeMemstoreTS = keyValueFormatVersion != null && Bytes.toInt(keyValueFormatVersion) > 0;
// Counters for the number of key/value pairs and the number of blocks
int entriesRead = 0;
int blocksRead = 0;
long memstoreTS = 0;
// Scan blocks the way the reader would scan them
fsdis.seek(0);
long curBlockPos = 0;
while (curBlockPos <= trailer.getLastDataBlockOffset()) {
HFileBlock block = blockReader.readBlockData(curBlockPos, -1, false).unpack(context, blockReader);
assertEquals(BlockType.DATA, block.getBlockType());
ByteBuff buf = block.getBufferWithoutHeader();
int keyLen = -1;
while (buf.hasRemaining()) {
keyLen = buf.getInt();
int valueLen = buf.getInt();
byte[] key = new byte[keyLen];
buf.get(key);
byte[] value = new byte[valueLen];
buf.get(value);
byte[] tagValue = null;
if (useTags) {
int tagLen = ((buf.get() & 0xff) << 8) ^ (buf.get() & 0xff);
tagValue = new byte[tagLen];
buf.get(tagValue);
}
if (includeMemstoreTS) {
ByteArrayInputStream byte_input = new ByteArrayInputStream(buf.array(), buf.arrayOffset() + buf.position(), buf.remaining());
DataInputStream data_input = new DataInputStream(byte_input);
memstoreTS = WritableUtils.readVLong(data_input);
buf.position(buf.position() + WritableUtils.getVIntSize(memstoreTS));
}
// A brute-force check to see that all keys and values are correct.
KeyValue kv = keyValues.get(entriesRead);
assertTrue(Bytes.compareTo(key, kv.getKey()) == 0);
assertTrue(Bytes.compareTo(value, 0, value.length, kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()) == 0);
if (useTags) {
assertNotNull(tagValue);
KeyValue tkv = kv;
assertEquals(tagValue.length, tkv.getTagsLength());
assertTrue(Bytes.compareTo(tagValue, 0, tagValue.length, tkv.getTagsArray(), tkv.getTagsOffset(), tkv.getTagsLength()) == 0);
}
++entriesRead;
}
++blocksRead;
curBlockPos += block.getOnDiskSizeWithHeader();
}
LOG.info("Finished reading: entries=" + entriesRead + ", blocksRead=" + blocksRead);
assertEquals(entryCount, entriesRead);
// Meta blocks. We can scan until the load-on-open data offset (which is
// the root block index offset in version 2) because we are not testing
// intermediate-level index blocks here.
int metaCounter = 0;
while (fsdis.getPos() < trailer.getLoadOnOpenDataOffset()) {
LOG.info("Current offset: " + fsdis.getPos() + ", scanning until " + trailer.getLoadOnOpenDataOffset());
HFileBlock block = blockReader.readBlockData(curBlockPos, -1, false).unpack(context, blockReader);
assertEquals(BlockType.META, block.getBlockType());
Text t = new Text();
ByteBuff buf = block.getBufferWithoutHeader();
if (Writables.getWritable(buf.array(), buf.arrayOffset(), buf.limit(), t) == null) {
throw new IOException("Failed to deserialize block " + this + " into a " + t.getClass().getSimpleName());
}
Text expectedText = (metaCounter == 0 ? new Text("Paris") : metaCounter == 1 ? new Text("Moscow") : new Text("Washington, D.C."));
assertEquals(expectedText, t);
LOG.info("Read meta block data: " + t);
++metaCounter;
curBlockPos += block.getOnDiskSizeWithHeader();
}
fsdis.close();
}
use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class TestScannerFromBucketCache method testBasicScanWithOffheapBucketCacheWithMBB.
@Test
public void testBasicScanWithOffheapBucketCacheWithMBB() throws IOException {
setUp(true, true);
byte[] row1 = Bytes.toBytes("row1offheap");
byte[] qf1 = Bytes.toBytes("qualifier1");
byte[] qf2 = Bytes.toBytes("qualifier2");
byte[] fam1 = Bytes.toBytes("famoffheap");
// System.currentTimeMillis();
long ts1 = 1;
long ts2 = ts1 + 1;
long ts3 = ts1 + 2;
// Setting up region
String method = this.getName();
this.region = initHRegion(tableName, method, conf, test_util, fam1);
try {
List<Cell> expected = insertData(row1, qf1, qf2, fam1, ts1, ts2, ts3, true);
List<Cell> actual = performScan(row1, fam1);
// Verify result
for (int i = 0; i < expected.size(); i++) {
assertFalse(actual.get(i) instanceof ByteBufferKeyValue);
assertTrue(CellUtil.equalsIgnoreMvccVersion(expected.get(i), actual.get(i)));
}
// Wait for the bucket cache threads to move the data to offheap
Thread.sleep(500);
// do the scan again and verify. This time it should be from the bucket cache in offheap mode
// but one of the cell will be copied due to the asSubByteBuff call
Scan scan = new Scan(row1);
scan.addFamily(fam1);
scan.setMaxVersions(10);
actual = new ArrayList<>();
InternalScanner scanner = region.getScanner(scan);
boolean hasNext = scanner.next(actual);
assertEquals(false, hasNext);
// Verify result
for (int i = 0; i < expected.size(); i++) {
if (i != 5) {
// the last cell fetched will be of type shareable but not offheap because
// the MBB is copied to form a single cell
assertTrue(actual.get(i) instanceof ByteBufferKeyValue);
}
}
} catch (InterruptedException e) {
} finally {
HBaseTestingUtility.closeRegionAndWAL(this.region);
this.region = null;
}
}
use of org.apache.hadoop.hbase.Cell in project hbase by apache.
the class TestGroupingTableMap method shouldNotCallCollectonSinceFindUniqueKeyValueMoreThanOnes.
@Test
@SuppressWarnings({ "deprecation", "unchecked" })
public void shouldNotCallCollectonSinceFindUniqueKeyValueMoreThanOnes() throws Exception {
GroupingTableMap gTableMap = null;
try {
Result result = mock(Result.class);
Reporter reporter = mock(Reporter.class);
gTableMap = new GroupingTableMap();
Configuration cfg = new Configuration();
cfg.set(GroupingTableMap.GROUP_COLUMNS, "familyA:qualifierA familyB:qualifierB");
JobConf jobConf = new JobConf(cfg);
gTableMap.configure(jobConf);
byte[] row = {};
List<Cell> keyValues = ImmutableList.<Cell>of(new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), Bytes.toBytes("1111")), new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), Bytes.toBytes("2222")), new KeyValue(row, "familyB".getBytes(), "qualifierB".getBytes(), Bytes.toBytes("3333")));
when(result.listCells()).thenReturn(keyValues);
OutputCollector<ImmutableBytesWritable, Result> outputCollectorMock = mock(OutputCollector.class);
gTableMap.map(null, result, outputCollectorMock, reporter);
verify(result).listCells();
verifyZeroInteractions(outputCollectorMock);
} finally {
if (gTableMap != null)
gTableMap.close();
}
}
Aggregations