use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class MobUtils method createWriter.
/**
* Creates a writer for the mob file in temp directory.
* @param conf The current configuration.
* @param fs The current file system.
* @param family The descriptor of the current column family.
* @param path The path for a temp directory.
* @param maxKeyCount The key count.
* @param compression The compression algorithm.
* @param cacheConfig The current cache config.
* @param cryptoContext The encryption context.
* @param checksumType The checksum type.
* @param bytesPerChecksum The bytes per checksum.
* @param blocksize The HFile block size.
* @param bloomType The bloom filter type.
* @param isCompaction If the writer is used in compaction.
* @return The writer for the mob file.
* @throws IOException
*/
public static StoreFileWriter createWriter(Configuration conf, FileSystem fs, HColumnDescriptor family, Path path, long maxKeyCount, Compression.Algorithm compression, CacheConfig cacheConfig, Encryption.Context cryptoContext, ChecksumType checksumType, int bytesPerChecksum, int blocksize, BloomType bloomType, boolean isCompaction) throws IOException {
if (compression == null) {
compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;
}
final CacheConfig writerCacheConf;
if (isCompaction) {
writerCacheConf = new CacheConfig(cacheConfig);
writerCacheConf.setCacheDataOnWrite(false);
} else {
writerCacheConf = cacheConfig;
}
HFileContext hFileContext = new HFileContextBuilder().withCompression(compression).withIncludesMvcc(true).withIncludesTags(true).withCompressTags(family.isCompressTags()).withChecksumType(checksumType).withBytesPerCheckSum(bytesPerChecksum).withBlockSize(blocksize).withHBaseCheckSum(true).withDataBlockEncoding(family.getDataBlockEncoding()).withEncryptionContext(cryptoContext).withCreateTime(EnvironmentEdgeManager.currentTime()).build();
StoreFileWriter w = new StoreFileWriter.Builder(conf, writerCacheConf, fs).withFilePath(path).withComparator(CellComparator.COMPARATOR).withBloomType(bloomType).withMaxKeyCount(maxKeyCount).withFileContext(hFileContext).build();
return w;
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestHalfStoreFileReader method testHalfScanAndReseek.
/**
* Test the scanner and reseek of a half hfile scanner. The scanner API
* demands that seekTo and reseekTo() only return < 0 if the key lies
* before the start of the file (with no position on the scanner). Returning
* 0 if perfect match (rare), and return > 1 if we got an imperfect match.
*
* The latter case being the most common, we should generally be returning 1,
* and if we do, there may or may not be a 'next' in the scanner/file.
*
* A bug in the half file scanner was returning -1 at the end of the bottom
* half, and that was causing the infrastructure above to go null causing NPEs
* and other problems. This test reproduces that failure, and also tests
* both the bottom and top of the file while we are at it.
*
* @throws IOException
*/
@Test
public void testHalfScanAndReseek() throws IOException {
String root_dir = TEST_UTIL.getDataTestDir().toString();
Path p = new Path(root_dir, "test");
Configuration conf = TEST_UTIL.getConfiguration();
FileSystem fs = FileSystem.get(conf);
CacheConfig cacheConf = new CacheConfig(conf);
HFileContext meta = new HFileContextBuilder().withBlockSize(1024).build();
HFile.Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(fs, p).withFileContext(meta).create();
// write some things.
List<KeyValue> items = genSomeKeys();
for (KeyValue kv : items) {
w.append(kv);
}
w.close();
HFile.Reader r = HFile.createReader(fs, p, cacheConf, conf);
r.loadFileInfo();
Cell midKV = r.midkey();
byte[] midkey = CellUtil.cloneRow(midKV);
//System.out.println("midkey: " + midKV + " or: " + Bytes.toStringBinary(midkey));
Reference bottom = new Reference(midkey, Reference.Range.bottom);
doTestOfScanAndReseek(p, fs, bottom, cacheConf);
Reference top = new Reference(midkey, Reference.Range.top);
doTestOfScanAndReseek(p, fs, top, cacheConf);
r.close();
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestDataBlockEncoders method testEncodersOnDataset.
private void testEncodersOnDataset(List<KeyValue> kvList, boolean includesMemstoreTS, boolean includesTags) throws IOException {
ByteBuffer unencodedDataBuf = RedundantKVGenerator.convertKvToByteBuffer(kvList, includesMemstoreTS);
HFileContext fileContext = new HFileContextBuilder().withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTags).build();
for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
DataBlockEncoder encoder = encoding.getEncoder();
if (encoder == null) {
continue;
}
HFileBlockEncodingContext encodingContext = new HFileBlockDefaultEncodingContext(encoding, HFILEBLOCK_DUMMY_HEADER, fileContext);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
baos.write(HFILEBLOCK_DUMMY_HEADER);
DataOutputStream dos = new DataOutputStream(baos);
encoder.startBlockEncoding(encodingContext, dos);
for (KeyValue kv : kvList) {
encoder.encode(kv, encodingContext, dos);
}
encoder.endBlockEncoding(encodingContext, dos, baos.getBuffer());
byte[] encodedData = baos.toByteArray();
testAlgorithm(encodedData, unencodedDataBuf, encoder);
}
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestPrefixTreeEncoding method testScanWithRandomData.
@Test
public void testScanWithRandomData() throws Exception {
PrefixTreeCodec encoder = new PrefixTreeCodec();
ByteArrayOutputStream baosInMemory = new ByteArrayOutputStream();
DataOutputStream userDataStream = new DataOutputStream(baosInMemory);
HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(false).withIncludesTags(includesTag).withCompression(Algorithm.NONE).build();
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
generateRandomTestData(kvset, numBatchesWritten++, includesTag, encoder, blkEncodingCtx, userDataStream);
EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR, encoder.newDataBlockDecodingContext(meta));
byte[] onDiskBytes = baosInMemory.toByteArray();
ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE, onDiskBytes.length - DataBlockEncoding.ID_SIZE);
seeker.setCurrentBuffer(new SingleByteBuff(readBuffer));
Cell previousKV = null;
do {
Cell currentKV = seeker.getCell();
System.out.println(currentKV);
if (previousKV != null && CellComparator.COMPARATOR.compare(currentKV, previousKV) < 0) {
dumpInputKVSet();
fail("Current kv " + currentKV + " is smaller than previous keyvalue " + previousKV);
}
if (!includesTag) {
assertFalse(currentKV.getTagsLength() > 0);
} else {
Assert.assertTrue(currentKV.getTagsLength() > 0);
}
previousKV = currentKV;
} while (seeker.next());
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestPrefixTreeEncoding method testSeekWithFixedData.
@Test
public void testSeekWithFixedData() throws Exception {
PrefixTreeCodec encoder = new PrefixTreeCodec();
int batchId = numBatchesWritten++;
HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(false).withIncludesTags(includesTag).withCompression(Algorithm.NONE).build();
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
ByteArrayOutputStream baosInMemory = new ByteArrayOutputStream();
DataOutputStream userDataStream = new DataOutputStream(baosInMemory);
generateFixedTestData(kvset, batchId, includesTag, encoder, blkEncodingCtx, userDataStream);
EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR, encoder.newDataBlockDecodingContext(meta));
byte[] onDiskBytes = baosInMemory.toByteArray();
ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE, onDiskBytes.length - DataBlockEncoding.ID_SIZE);
verifySeeking(seeker, readBuffer, batchId);
}
Aggregations