use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestPrefixTreeEncoding method testSeekWithRandomData.
@Test
public void testSeekWithRandomData() throws Exception {
PrefixTreeCodec encoder = new PrefixTreeCodec();
ByteArrayOutputStream baosInMemory = new ByteArrayOutputStream();
DataOutputStream userDataStream = new DataOutputStream(baosInMemory);
int batchId = numBatchesWritten++;
HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(false).withIncludesTags(includesTag).withCompression(Algorithm.NONE).build();
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
generateRandomTestData(kvset, batchId, includesTag, encoder, blkEncodingCtx, userDataStream);
EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR, encoder.newDataBlockDecodingContext(meta));
byte[] onDiskBytes = baosInMemory.toByteArray();
ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE, onDiskBytes.length - DataBlockEncoding.ID_SIZE);
verifySeeking(seeker, readBuffer, batchId);
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class HFileTestUtil method createHFile.
/**
* Create an HFile with the given number of rows between a given
* start key and end key @ family:qualifier.
* If withTag is true, we add the rowKey as the tag value for
* tagtype MOB_TABLE_NAME_TAG_TYPE
*/
public static void createHFile(Configuration configuration, FileSystem fs, Path path, DataBlockEncoding encoding, byte[] family, byte[] qualifier, byte[] startKey, byte[] endKey, int numRows, boolean withTag) throws IOException {
HFileContext meta = new HFileContextBuilder().withIncludesTags(withTag).withDataBlockEncoding(encoding).build();
HFile.Writer writer = HFile.getWriterFactory(configuration, new CacheConfig(configuration)).withPath(fs, path).withFileContext(meta).create();
long now = System.currentTimeMillis();
try {
// subtract 2 since iterateOnSplits doesn't include boundary keys
for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, numRows - 2)) {
Cell kv = new KeyValue(key, family, qualifier, now, key);
if (withTag) {
// add a tag. Arbitrarily chose mob tag since we have a helper already.
Tag tableNameTag = new ArrayBackedTag(TagType.MOB_TABLE_NAME_TAG_TYPE, key);
kv = MobUtils.createMobRefCell(kv, key, tableNameTag);
// verify that the kv has the tag.
Tag t = CellUtil.getTag(kv, TagType.MOB_TABLE_NAME_TAG_TYPE);
if (t == null) {
throw new IllegalStateException("Tag didn't stick to KV " + kv.toString());
}
}
writer.append(kv);
}
} finally {
writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
writer.close();
}
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestStore method addStoreFile.
private void addStoreFile() throws IOException {
StoreFile f = this.store.getStorefiles().iterator().next();
Path storedir = f.getPath().getParent();
long seqid = this.store.getMaxSequenceId();
Configuration c = TEST_UTIL.getConfiguration();
FileSystem fs = FileSystem.get(c);
HFileContext fileContext = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build();
StoreFileWriter w = new StoreFileWriter.Builder(c, new CacheConfig(c), fs).withOutputDir(storedir).withFileContext(fileContext).build();
w.appendMetadata(seqid + 1, false);
w.close();
LOG.info("Added store file:" + w.getPath());
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestStoreFile method writeStoreFile.
private StoreFileWriter writeStoreFile(Configuration conf, CacheConfig cacheConf, Path path, int numBlocks) throws IOException {
// Let's put ~5 small KVs in each block, so let's make 5*numBlocks KVs
int numKVs = 5 * numBlocks;
List<KeyValue> kvs = new ArrayList<>(numKVs);
byte[] b = Bytes.toBytes("x");
int totalSize = 0;
for (int i = numKVs; i > 0; i--) {
KeyValue kv = new KeyValue(b, b, b, i, b);
kvs.add(kv);
// kv has memstoreTS 0, which takes 1 byte to store.
totalSize += kv.getLength() + 1;
}
int blockSize = totalSize / numBlocks;
HFileContext meta = new HFileContextBuilder().withBlockSize(blockSize).withChecksumType(CKTYPE).withBytesPerCheckSum(CKBYTES).build();
// Make a store file and write data to it.
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(path).withMaxKeyCount(2000).withFileContext(meta).build();
// We'll write N-1 KVs to ensure we don't write an extra block
kvs.remove(kvs.size() - 1);
for (KeyValue kv : kvs) {
writer.append(kv);
}
writer.appendMetadata(0, false);
writer.close();
return writer;
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestStoreFile method testReseek.
/**
* Test for HBASE-8012
*/
@Test
public void testReseek() throws Exception {
// write the file
Path f = new Path(ROOT_DIR, getName());
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it.
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(f).withFileContext(meta).build();
writeStoreFile(writer);
writer.close();
StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf);
// Now do reseek with empty KV to position to the beginning of the file
KeyValue k = KeyValueUtil.createFirstOnRow(HConstants.EMPTY_BYTE_ARRAY);
StoreFileScanner s = getStoreFileScanner(reader, false, false);
s.reseek(k);
assertNotNull("Intial reseek should position at the beginning of the file", s.peek());
}
Aggregations