use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.
the class TestCompoundBloomFilter method writeStoreFile.
private Path writeStoreFile(int t, BloomType bt, List<KeyValue> kvs) throws IOException {
conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE, BLOOM_BLOCK_SIZES[t]);
conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
cacheConf = new CacheConfig(conf, blockCache);
HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCK_SIZES[t]).build();
StoreFileWriter w = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(TEST_UTIL.getDataTestDir()).withBloomType(bt).withFileContext(meta).build();
assertTrue(w.hasGeneralBloom());
assertTrue(w.getGeneralBloomWriter() instanceof CompoundBloomFilterWriter);
CompoundBloomFilterWriter cbbf = (CompoundBloomFilterWriter) w.getGeneralBloomWriter();
int keyCount = 0;
KeyValue prev = null;
LOG.debug("Total keys/values to insert: " + kvs.size());
for (KeyValue kv : kvs) {
w.append(kv);
// Validate the key count in the Bloom filter.
boolean newKey = true;
if (prev != null) {
newKey = !(bt == BloomType.ROW ? CellUtil.matchingRows(kv, prev) : CellUtil.matchingRowColumn(kv, prev));
}
if (newKey)
++keyCount;
assertEquals(keyCount, cbbf.getKeyCount());
prev = kv;
}
w.close();
return w.getPath();
}
use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.
the class TestScannerWithBulkload method writeToHFile.
// If nativeHFile is true, we will set cell seq id and MAX_SEQ_ID_KEY in the file.
// Else, we will set BULKLOAD_TIME_KEY.
private Path writeToHFile(long l, String hFilePath, String pathStr, boolean nativeHFile) throws IOException {
FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
final Path hfilePath = new Path(hFilePath);
fs.mkdirs(hfilePath);
Path path = new Path(pathStr);
HFile.WriterFactory wf = HFile.getWriterFactoryNoCache(TEST_UTIL.getConfiguration());
Assert.assertNotNull(wf);
HFileContext context = new HFileContextBuilder().build();
HFile.Writer writer = wf.withPath(fs, path).withFileContext(context).create();
KeyValue kv = new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, Bytes.toBytes("version2"));
// Set cell seq id to test bulk load native hfiles.
if (nativeHFile) {
// Set a big seq id. Scan should not look at this seq id in a bulk loaded file.
// Scan should only look at the seq id appended at the bulk load time, and not skip
// this kv.
kv.setSequenceId(9999999);
}
writer.append(kv);
if (nativeHFile) {
// Set a big MAX_SEQ_ID_KEY. Scan should not look at this seq id in a bulk loaded file.
// Scan should only look at the seq id appended at the bulk load time, and not skip its
// kv.
writer.appendFileInfo(MAX_SEQ_ID_KEY, Bytes.toBytes(new Long(9999999)));
} else {
writer.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(EnvironmentEdgeManager.currentTime()));
}
writer.close();
return hfilePath;
}
use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.
the class TestStoreScannerClosure method writeStoreFile.
private Path writeStoreFile() throws IOException {
Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(), "TestHFile");
HFileContext meta = new HFileContextBuilder().withBlockSize(64 * 1024).build();
StoreFileWriter sfw = new StoreFileWriter.Builder(CONF, fs).withOutputDir(storeFileParentDir).withFileContext(meta).build();
final int rowLen = 32;
Random RNG = new Random();
for (int i = 0; i < 1000; ++i) {
byte[] k = RandomKeyValueUtil.randomOrderedKey(RNG, i);
byte[] v = RandomKeyValueUtil.randomValue(RNG);
int cfLen = RNG.nextInt(k.length - rowLen + 1);
KeyValue kv = new KeyValue(k, 0, rowLen, k, rowLen, cfLen, k, rowLen + cfLen, k.length - rowLen - cfLen, RNG.nextLong(), generateKeyType(RNG), v, 0, v.length);
sfw.append(kv);
}
sfw.close();
return sfw.getPath();
}
use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.
the class TestSecureBulkloadListener method createHFileForFamilies.
private String createHFileForFamilies(byte[] family) throws IOException {
HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(conf);
Path testDir = new Path(dfs.getWorkingDirectory(), new Path(name.getMethodName(), Bytes.toString(family)));
if (!dfs.exists(testDir)) {
dfs.mkdirs(testDir);
}
Path hfilePath = new Path(testDir, generateUniqueName(null));
FSDataOutputStream out = dfs.createFile(hfilePath).build();
try {
hFileFactory.withOutputStream(out);
hFileFactory.withFileContext(new HFileContextBuilder().build());
HFile.Writer writer = hFileFactory.create();
try {
writer.append(new KeyValue(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(randomBytes).setFamily(family).setQualifier(randomBytes).setTimestamp(0L).setType(KeyValue.Type.Put.getCode()).setValue(randomBytes).build()));
} finally {
writer.close();
}
} finally {
out.close();
}
return hfilePath.toString();
}
use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.
the class TestStoreFileScannerWithTagCompression method testReseek.
@Test
public void testReseek() throws Exception {
// write the file
Path f = new Path(ROOT_DIR, "testReseek");
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).withIncludesTags(true).withCompressTags(true).withDataBlockEncoding(DataBlockEncoding.PREFIX).build();
// Make a store file and write data to it.
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(f).withFileContext(meta).build();
writeStoreFile(writer);
writer.close();
ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, f).build();
HFileInfo fileInfo = new HFileInfo(context, conf);
StoreFileReader reader = new StoreFileReader(context, fileInfo, cacheConf, new AtomicInteger(0), conf);
fileInfo.initMetaAndIndex(reader.getHFileReader());
StoreFileScanner s = reader.getStoreFileScanner(false, false, false, 0, 0, false);
try {
// Now do reseek with empty KV to position to the beginning of the file
KeyValue k = KeyValueUtil.createFirstOnRow(Bytes.toBytes("k2"));
s.reseek(k);
Cell kv = s.next();
kv = s.next();
kv = s.next();
byte[] key5 = Bytes.toBytes("k5");
assertTrue(Bytes.equals(key5, 0, key5.length, kv.getRowArray(), kv.getRowOffset(), kv.getRowLength()));
List<Tag> tags = PrivateCellUtil.getTags(kv);
assertEquals(1, tags.size());
assertEquals("tag3", Bytes.toString(Tag.cloneValue(tags.get(0))));
} finally {
s.close();
}
}
Aggregations