use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestCachedMobFile method testReadKeyValue.
@Test
public void testReadKeyValue() throws Exception {
Path testDir = TEST_UTIL.getDataTestDir();
FileSystem fs = testDir.getFileSystem(conf);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(testDir).withFileContext(meta).build();
String caseName = getName();
MobTestUtil.writeStoreFile(writer, caseName);
CachedMobFile cachedMobFile = CachedMobFile.create(fs, writer.getPath(), conf, cacheConf);
byte[] family = Bytes.toBytes(caseName);
byte[] qualify = Bytes.toBytes(caseName);
// Test the start key
// The start key bytes
byte[] startKey = Bytes.toBytes("aa");
KeyValue expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey);
KeyValue seekKey = expectedKey.createKeyOnly(false);
Cell cell = cachedMobFile.readCell(seekKey, false);
MobTestUtil.assertCellEquals(expectedKey, cell);
// Test the end key
// The end key bytes
byte[] endKey = Bytes.toBytes("zz");
expectedKey = new KeyValue(endKey, family, qualify, Long.MAX_VALUE, Type.Put, endKey);
seekKey = expectedKey.createKeyOnly(false);
cell = cachedMobFile.readCell(seekKey, false);
MobTestUtil.assertCellEquals(expectedKey, cell);
// Test the random key
byte[] randomKey = Bytes.toBytes(MobTestUtil.generateRandomString(2));
expectedKey = new KeyValue(randomKey, family, qualify, Long.MAX_VALUE, Type.Put, randomKey);
seekKey = expectedKey.createKeyOnly(false);
cell = cachedMobFile.readCell(seekKey, false);
MobTestUtil.assertCellEquals(expectedKey, cell);
// Test the key which is less than the start key
// Smaller than "aa"
byte[] lowerKey = Bytes.toBytes("a1");
expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey);
seekKey = new KeyValue(lowerKey, family, qualify, Long.MAX_VALUE, Type.Put, lowerKey);
cell = cachedMobFile.readCell(seekKey, false);
MobTestUtil.assertCellEquals(expectedKey, cell);
// Test the key which is more than the end key
// Bigger than "zz"
byte[] upperKey = Bytes.toBytes("z{");
seekKey = new KeyValue(upperKey, family, qualify, Long.MAX_VALUE, Type.Put, upperKey);
cell = cachedMobFile.readCell(seekKey, false);
Assert.assertNull(cell);
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestMobFile method testReadKeyValue.
@Test
public void testReadKeyValue() throws Exception {
Path testDir = TEST_UTIL.getDataTestDir();
FileSystem fs = testDir.getFileSystem(conf);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(testDir).withFileContext(meta).build();
String caseName = getName();
MobTestUtil.writeStoreFile(writer, caseName);
MobFile mobFile = new MobFile(new StoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE));
byte[] family = Bytes.toBytes(caseName);
byte[] qualify = Bytes.toBytes(caseName);
// Test the start key
// The start key bytes
byte[] startKey = Bytes.toBytes("aa");
KeyValue expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey);
KeyValue seekKey = expectedKey.createKeyOnly(false);
Cell cell = mobFile.readCell(seekKey, false);
MobTestUtil.assertCellEquals(expectedKey, cell);
// Test the end key
// The end key bytes
byte[] endKey = Bytes.toBytes("zz");
expectedKey = new KeyValue(endKey, family, qualify, Long.MAX_VALUE, Type.Put, endKey);
seekKey = expectedKey.createKeyOnly(false);
cell = mobFile.readCell(seekKey, false);
MobTestUtil.assertCellEquals(expectedKey, cell);
// Test the random key
byte[] randomKey = Bytes.toBytes(MobTestUtil.generateRandomString(2));
expectedKey = new KeyValue(randomKey, family, qualify, Long.MAX_VALUE, Type.Put, randomKey);
seekKey = expectedKey.createKeyOnly(false);
cell = mobFile.readCell(seekKey, false);
MobTestUtil.assertCellEquals(expectedKey, cell);
// Test the key which is less than the start key
// Smaller than "aa"
byte[] lowerKey = Bytes.toBytes("a1");
expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey);
seekKey = new KeyValue(lowerKey, family, qualify, Long.MAX_VALUE, Type.Put, lowerKey);
cell = mobFile.readCell(seekKey, false);
MobTestUtil.assertCellEquals(expectedKey, cell);
// Test the key which is more than the end key
// Bigger than "zz"
byte[] upperKey = Bytes.toBytes("z{");
seekKey = new KeyValue(upperKey, family, qualify, Long.MAX_VALUE, Type.Put, upperKey);
cell = mobFile.readCell(seekKey, false);
assertNull(cell);
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestMobFile method testGetScanner.
@Test
public void testGetScanner() throws Exception {
Path testDir = TEST_UTIL.getDataTestDir();
FileSystem fs = testDir.getFileSystem(conf);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(testDir).withFileContext(meta).build();
MobTestUtil.writeStoreFile(writer, getName());
MobFile mobFile = new MobFile(new StoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE));
assertNotNull(mobFile.getScanner());
assertTrue(mobFile.getScanner() instanceof StoreFileScanner);
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class CreateRandomStoreFile method run.
/**
* Runs the tools.
*
* @param args command-line arguments
* @return true in case of success
* @throws IOException
*/
public boolean run(String[] args) throws IOException {
options.addOption(OUTPUT_DIR_OPTION, "output_dir", true, "Output directory");
options.addOption(NUM_KV_OPTION, "num_kv", true, "Number of key/value pairs");
options.addOption(KEY_SIZE_OPTION, "key_size", true, "Average key size");
options.addOption(VALUE_SIZE_OPTION, "value_size", true, "Average value size");
options.addOption(HFILE_VERSION_OPTION, "hfile_version", true, "HFile version to create");
options.addOption(COMPRESSION_OPTION, "compression", true, " Compression type, one of " + Arrays.toString(Compression.Algorithm.values()));
options.addOption(BLOOM_FILTER_OPTION, "bloom_filter", true, "Bloom filter type, one of " + Arrays.toString(BloomType.values()));
options.addOption(BLOCK_SIZE_OPTION, "block_size", true, "HFile block size");
options.addOption(BLOOM_BLOCK_SIZE_OPTION, "bloom_block_size", true, "Compound Bloom filters block size");
options.addOption(INDEX_BLOCK_SIZE_OPTION, "index_block_size", true, "Index block size");
if (args.length == 0) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp(CreateRandomStoreFile.class.getSimpleName(), options, true);
return false;
}
CommandLineParser parser = new PosixParser();
CommandLine cmdLine;
try {
cmdLine = parser.parse(options, args);
} catch (ParseException ex) {
LOG.error(ex);
return false;
}
if (!cmdLine.hasOption(OUTPUT_DIR_OPTION)) {
LOG.error("Output directory is not specified");
return false;
}
if (!cmdLine.hasOption(NUM_KV_OPTION)) {
LOG.error("The number of keys/values not specified");
return false;
}
if (!cmdLine.hasOption(KEY_SIZE_OPTION)) {
LOG.error("Key size is not specified");
return false;
}
if (!cmdLine.hasOption(VALUE_SIZE_OPTION)) {
LOG.error("Value size not specified");
return false;
}
Configuration conf = HBaseConfiguration.create();
Path outputDir = new Path(cmdLine.getOptionValue(OUTPUT_DIR_OPTION));
long numKV = Long.parseLong(cmdLine.getOptionValue(NUM_KV_OPTION));
configureKeyValue(numKV, Integer.parseInt(cmdLine.getOptionValue(KEY_SIZE_OPTION)), Integer.parseInt(cmdLine.getOptionValue(VALUE_SIZE_OPTION)));
FileSystem fs = FileSystem.get(conf);
Compression.Algorithm compr = Compression.Algorithm.NONE;
if (cmdLine.hasOption(COMPRESSION_OPTION)) {
compr = Compression.Algorithm.valueOf(cmdLine.getOptionValue(COMPRESSION_OPTION));
}
BloomType bloomType = BloomType.NONE;
if (cmdLine.hasOption(BLOOM_FILTER_OPTION)) {
bloomType = BloomType.valueOf(cmdLine.getOptionValue(BLOOM_FILTER_OPTION));
}
int blockSize = HConstants.DEFAULT_BLOCKSIZE;
if (cmdLine.hasOption(BLOCK_SIZE_OPTION))
blockSize = Integer.valueOf(cmdLine.getOptionValue(BLOCK_SIZE_OPTION));
if (cmdLine.hasOption(BLOOM_BLOCK_SIZE_OPTION)) {
conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE, Integer.valueOf(cmdLine.getOptionValue(BLOOM_BLOCK_SIZE_OPTION)));
}
if (cmdLine.hasOption(INDEX_BLOCK_SIZE_OPTION)) {
conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, Integer.valueOf(cmdLine.getOptionValue(INDEX_BLOCK_SIZE_OPTION)));
}
HFileContext meta = new HFileContextBuilder().withCompression(compr).withBlockSize(blockSize).build();
StoreFileWriter sfw = new StoreFileWriter.Builder(conf, new CacheConfig(conf), fs).withOutputDir(outputDir).withBloomType(bloomType).withMaxKeyCount(numKV).withFileContext(meta).build();
rand = new Random();
LOG.info("Writing " + numKV + " key/value pairs");
for (long i = 0; i < numKV; ++i) {
sfw.append(generateKeyValue(i));
}
int numMetaBlocks = rand.nextInt(10) + 1;
LOG.info("Writing " + numMetaBlocks + " meta blocks");
for (int metaI = 0; metaI < numMetaBlocks; ++metaI) {
sfw.getHFileWriter().appendMetaBlock(generateString(), new BytesWritable(generateValue()));
}
sfw.close();
Path storeFilePath = sfw.getPath();
long fileSize = fs.getFileStatus(storeFilePath).getLen();
LOG.info("Created " + storeFilePath + ", " + fileSize + " bytes");
return true;
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestBulkLoad method createHFileForFamilies.
private String createHFileForFamilies(byte[] family) throws IOException {
HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(conf);
// TODO We need a way to do this without creating files
File hFileLocation = testFolder.newFile();
FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation), null);
try {
hFileFactory.withOutputStream(out);
hFileFactory.withFileContext(new HFileContext());
HFile.Writer writer = hFileFactory.create();
try {
writer.append(new KeyValue(CellUtil.createCell(randomBytes, family, randomBytes, 0l, KeyValue.Type.Put.getCode(), randomBytes)));
} finally {
writer.close();
}
} finally {
out.close();
}
return hFileLocation.getAbsoluteFile().getAbsolutePath();
}
Aggregations