use of org.apache.hadoop.hbase.regionserver.StoreFileWriter in project hbase by apache.
the class TestCacheOnWrite method writeStoreFile.
private void writeStoreFile(boolean useTags) throws IOException {
Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(), "test_cache_on_write");
HFileContext meta = new HFileContextBuilder().withCompression(compress).withBytesPerCheckSum(CKBYTES).withChecksumType(ChecksumType.NULL).withBlockSize(DATA_BLOCK_SIZE).withDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding()).withIncludesTags(useTags).build();
StoreFileWriter sfw = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(storeFileParentDir).withComparator(CellComparator.COMPARATOR).withFileContext(meta).withBloomType(BLOOM_TYPE).withMaxKeyCount(NUM_KV).build();
byte[] cf = Bytes.toBytes("fam");
for (int i = 0; i < NUM_KV; ++i) {
byte[] row = RandomKeyValueUtil.randomOrderedKey(rand, i);
byte[] qualifier = RandomKeyValueUtil.randomRowOrQualifier(rand);
byte[] value = RandomKeyValueUtil.randomValue(rand);
KeyValue kv;
if (useTags) {
Tag t = new ArrayBackedTag((byte) 1, "visibility");
List<Tag> tagList = new ArrayList<>();
tagList.add(t);
Tag[] tags = new Tag[1];
tags[0] = t;
kv = new KeyValue(row, 0, row.length, cf, 0, cf.length, qualifier, 0, qualifier.length, rand.nextLong(), generateKeyType(rand), value, 0, value.length, tagList);
} else {
kv = new KeyValue(row, 0, row.length, cf, 0, cf.length, qualifier, 0, qualifier.length, rand.nextLong(), generateKeyType(rand), value, 0, value.length);
}
sfw.append(kv);
}
sfw.close();
storeFilePath = sfw.getPath();
}
use of org.apache.hadoop.hbase.regionserver.StoreFileWriter in project hbase by apache.
the class TestHFile method writeStoreFile.
private Path writeStoreFile() throws IOException {
Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(), "TestHFile");
HFileContext meta = new HFileContextBuilder().withBlockSize(64 * 1024).build();
StoreFileWriter sfw = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(storeFileParentDir).withComparator(CellComparator.COMPARATOR).withFileContext(meta).build();
final int rowLen = 32;
Random RNG = new Random();
for (int i = 0; i < 1000; ++i) {
byte[] k = RandomKeyValueUtil.randomOrderedKey(RNG, i);
byte[] v = RandomKeyValueUtil.randomValue(RNG);
int cfLen = RNG.nextInt(k.length - rowLen + 1);
KeyValue kv = new KeyValue(k, 0, rowLen, k, rowLen, cfLen, k, rowLen + cfLen, k.length - rowLen - cfLen, RNG.nextLong(), generateKeyType(RNG), v, 0, v.length);
sfw.append(kv);
}
sfw.close();
return sfw.getPath();
}
use of org.apache.hadoop.hbase.regionserver.StoreFileWriter in project hbase by apache.
the class TestSeekBeforeWithInlineBlocks method testMultiIndexLevelRandomHFileWithBlooms.
/**
* Scanner.seekBefore() could fail because when seeking to a previous HFile data block, it needs
* to know the size of that data block, which it calculates using current data block offset and
* the previous data block offset. This fails to work when there are leaf-level index blocks in
* the scannable section of the HFile, i.e. starting in HFileV2. This test will try seekBefore()
* on a flat (single-level) and multi-level (2,3) HFile and confirm this bug is now fixed. This
* bug also happens for inline Bloom blocks for the same reasons.
*/
@Test
public void testMultiIndexLevelRandomHFileWithBlooms() throws IOException {
conf = TEST_UTIL.getConfiguration();
// Try out different HFile versions to ensure reverse scan works on each version
for (int hfileVersion = HFile.MIN_FORMAT_VERSION_WITH_TAGS; hfileVersion <= HFile.MAX_FORMAT_VERSION; hfileVersion++) {
conf.setInt(HFile.FORMAT_VERSION_KEY, hfileVersion);
fs = HFileSystem.get(conf);
// Try out different bloom types because inline Bloom blocks break seekBefore()
for (BloomType bloomType : BloomType.values()) {
// Test out HFile block indices of various sizes/levels
for (int testI = 0; testI < INDEX_CHUNK_SIZES.length; testI++) {
int indexBlockSize = INDEX_CHUNK_SIZES[testI];
int expectedNumLevels = EXPECTED_NUM_LEVELS[testI];
LOG.info(String.format("Testing HFileVersion: %s, BloomType: %s, Index Levels: %s", hfileVersion, bloomType, expectedNumLevels));
conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, indexBlockSize);
conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE, BLOOM_BLOCK_SIZE);
Cell[] cells = new Cell[NUM_KV];
Path hfilePath = new Path(TEST_UTIL.getDataTestDir(), String.format("testMultiIndexLevelRandomHFileWithBlooms-%s-%s-%s", hfileVersion, bloomType, testI));
// Disable caching to prevent it from hiding any bugs in block seeks/reads
conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
CacheConfig cacheConf = new CacheConfig(conf);
// Write the HFile
{
HFileContext meta = new HFileContextBuilder().withBlockSize(DATA_BLOCK_SIZE).build();
StoreFileWriter storeFileWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(hfilePath).withFileContext(meta).withBloomType(bloomType).build();
for (int i = 0; i < NUM_KV; i++) {
byte[] row = RandomKeyValueUtil.randomOrderedKey(RAND, i);
byte[] qual = RandomKeyValueUtil.randomRowOrQualifier(RAND);
byte[] value = RandomKeyValueUtil.randomValue(RAND);
KeyValue kv = new KeyValue(row, FAM, qual, value);
storeFileWriter.append(kv);
cells[i] = kv;
}
storeFileWriter.close();
}
// Read the HFile
HFile.Reader reader = HFile.createReader(fs, hfilePath, cacheConf, conf);
// Sanity check the HFile index level
assertEquals(expectedNumLevels, reader.getTrailer().getNumDataIndexLevels());
// enabled and disabled
for (boolean pread : new boolean[] { false, true }) {
HFileScanner scanner = reader.getScanner(true, pread);
checkNoSeekBefore(cells, scanner, 0);
for (int i = 1; i < NUM_KV; i++) {
checkSeekBefore(cells, scanner, i);
checkCell(cells[i - 1], scanner.getCell());
}
assertTrue(scanner.seekTo());
for (int i = NUM_KV - 1; i >= 1; i--) {
checkSeekBefore(cells, scanner, i);
checkCell(cells[i - 1], scanner.getCell());
}
checkNoSeekBefore(cells, scanner, 0);
scanner.close();
}
reader.close();
}
}
}
}
use of org.apache.hadoop.hbase.regionserver.StoreFileWriter in project hbase by apache.
the class TestPrefetch method writeStoreFile.
private Path writeStoreFile() throws IOException {
Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(), "TestPrefetch");
HFileContext meta = new HFileContextBuilder().withBlockSize(DATA_BLOCK_SIZE).build();
StoreFileWriter sfw = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(storeFileParentDir).withComparator(CellComparator.COMPARATOR).withFileContext(meta).build();
final int rowLen = 32;
for (int i = 0; i < NUM_KV; ++i) {
byte[] k = RandomKeyValueUtil.randomOrderedKey(RNG, i);
byte[] v = RandomKeyValueUtil.randomValue(RNG);
int cfLen = RNG.nextInt(k.length - rowLen + 1);
KeyValue kv = new KeyValue(k, 0, rowLen, k, rowLen, cfLen, k, rowLen + cfLen, k.length - rowLen - cfLen, RNG.nextLong(), generateKeyType(RNG), v, 0, v.length);
sfw.append(kv);
}
sfw.close();
return sfw.getPath();
}
use of org.apache.hadoop.hbase.regionserver.StoreFileWriter in project hbase by apache.
the class TestCachedMobFile method testReadKeyValue.
@Test
public void testReadKeyValue() throws Exception {
Path testDir = TEST_UTIL.getDataTestDir();
FileSystem fs = testDir.getFileSystem(conf);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(testDir).withFileContext(meta).build();
String caseName = getName();
MobTestUtil.writeStoreFile(writer, caseName);
CachedMobFile cachedMobFile = CachedMobFile.create(fs, writer.getPath(), conf, cacheConf);
byte[] family = Bytes.toBytes(caseName);
byte[] qualify = Bytes.toBytes(caseName);
// Test the start key
// The start key bytes
byte[] startKey = Bytes.toBytes("aa");
KeyValue expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey);
KeyValue seekKey = expectedKey.createKeyOnly(false);
Cell cell = cachedMobFile.readCell(seekKey, false);
MobTestUtil.assertCellEquals(expectedKey, cell);
// Test the end key
// The end key bytes
byte[] endKey = Bytes.toBytes("zz");
expectedKey = new KeyValue(endKey, family, qualify, Long.MAX_VALUE, Type.Put, endKey);
seekKey = expectedKey.createKeyOnly(false);
cell = cachedMobFile.readCell(seekKey, false);
MobTestUtil.assertCellEquals(expectedKey, cell);
// Test the random key
byte[] randomKey = Bytes.toBytes(MobTestUtil.generateRandomString(2));
expectedKey = new KeyValue(randomKey, family, qualify, Long.MAX_VALUE, Type.Put, randomKey);
seekKey = expectedKey.createKeyOnly(false);
cell = cachedMobFile.readCell(seekKey, false);
MobTestUtil.assertCellEquals(expectedKey, cell);
// Test the key which is less than the start key
// Smaller than "aa"
byte[] lowerKey = Bytes.toBytes("a1");
expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey);
seekKey = new KeyValue(lowerKey, family, qualify, Long.MAX_VALUE, Type.Put, lowerKey);
cell = cachedMobFile.readCell(seekKey, false);
MobTestUtil.assertCellEquals(expectedKey, cell);
// Test the key which is more than the end key
// Bigger than "zz"
byte[] upperKey = Bytes.toBytes("z{");
seekKey = new KeyValue(upperKey, family, qualify, Long.MAX_VALUE, Type.Put, upperKey);
cell = cachedMobFile.readCell(seekKey, false);
Assert.assertNull(cell);
}
Aggregations