use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.
the class LoadIncrementalHFiles method copyHFileHalf.
/**
* Copy half of an HFile into a new HFile.
*/
private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile, Reference reference, HColumnDescriptor familyDescriptor) throws IOException {
FileSystem fs = inFile.getFileSystem(conf);
CacheConfig cacheConf = new CacheConfig(conf);
HalfStoreFileReader halfReader = null;
StoreFileWriter halfWriter = null;
try {
halfReader = new HalfStoreFileReader(fs, inFile, cacheConf, reference, conf);
Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();
int blocksize = familyDescriptor.getBlocksize();
Algorithm compression = familyDescriptor.getCompressionType();
BloomType bloomFilterType = familyDescriptor.getBloomFilterType();
HFileContext hFileContext = new HFileContextBuilder().withCompression(compression).withChecksumType(HStore.getChecksumType(conf)).withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)).withBlockSize(blocksize).withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()).withIncludesTags(true).build();
halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile).withBloomType(bloomFilterType).withFileContext(hFileContext).build();
HFileScanner scanner = halfReader.getScanner(false, false, false);
scanner.seekTo();
do {
halfWriter.append(scanner.getCell());
} while (scanner.next());
for (Map.Entry<byte[], byte[]> entry : fileInfo.entrySet()) {
if (shouldCopyHFileMetaKey(entry.getKey())) {
halfWriter.appendFileInfo(entry.getKey(), entry.getValue());
}
}
} finally {
if (halfWriter != null) {
halfWriter.close();
}
if (halfReader != null) {
halfReader.close(cacheConf.shouldEvictOnClose());
}
}
}
use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.
the class BoundedRecoveredHFilesOutputSink method createRecoveredHFileWriter.
/**
* @return Returns a base HFile without compressions or encodings; good enough for recovery
* given hfile has metadata on how it was written.
*/
private StoreFileWriter createRecoveredHFileWriter(TableName tableName, String regionName, long seqId, String familyName, boolean isMetaTable) throws IOException {
Path outputDir = WALSplitUtil.tryCreateRecoveredHFilesDir(walSplitter.rootFS, walSplitter.conf, tableName, regionName, familyName);
StoreFileWriter.Builder writerBuilder = new StoreFileWriter.Builder(walSplitter.conf, CacheConfig.DISABLED, walSplitter.rootFS).withOutputDir(outputDir);
HFileContext hFileContext = new HFileContextBuilder().withChecksumType(StoreUtils.getChecksumType(walSplitter.conf)).withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(walSplitter.conf)).withCellComparator(isMetaTable ? MetaCellComparator.META_COMPARATOR : CellComparatorImpl.COMPARATOR).build();
return writerBuilder.withFileContext(hFileContext).build();
}
use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.
the class TestDataBlockEncoders method testSeekingOnSample.
/**
* Test seeking while file is encoded.
*/
@Test
public void testSeekingOnSample() throws IOException {
List<KeyValue> sampleKv = generator.generateTestKeyValues(NUMBER_OF_KV, includesTags);
// create all seekers
List<DataBlockEncoder.EncodedSeeker> encodedSeekers = new ArrayList<>();
for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
LOG.info("Encoding: " + encoding);
DataBlockEncoder encoder = encoding.getEncoder();
if (encoder == null) {
continue;
}
LOG.info("Encoder: " + encoder);
ByteBuffer encodedBuffer = encodeKeyValues(encoding, sampleKv, getEncodingContext(conf, Compression.Algorithm.NONE, encoding), this.useOffheapData);
HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTags).withCompression(Compression.Algorithm.NONE).build();
DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta));
seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer));
encodedSeekers.add(seeker);
}
LOG.info("Testing it!");
// try a few random seeks
for (boolean seekBefore : new boolean[] { false, true }) {
for (int i = 0; i < NUM_RANDOM_SEEKS; ++i) {
int keyValueId;
if (!seekBefore) {
keyValueId = randomizer.nextInt(sampleKv.size());
} else {
keyValueId = randomizer.nextInt(sampleKv.size() - 1) + 1;
}
KeyValue keyValue = sampleKv.get(keyValueId);
checkSeekingConsistency(encodedSeekers, seekBefore, keyValue);
}
}
// check edge cases
LOG.info("Checking edge cases");
checkSeekingConsistency(encodedSeekers, false, sampleKv.get(0));
for (boolean seekBefore : new boolean[] { false, true }) {
checkSeekingConsistency(encodedSeekers, seekBefore, sampleKv.get(sampleKv.size() - 1));
KeyValue midKv = sampleKv.get(sampleKv.size() / 2);
Cell lastMidKv = PrivateCellUtil.createLastOnRowCol(midKv);
checkSeekingConsistency(encodedSeekers, seekBefore, lastMidKv);
}
LOG.info("Done");
}
use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.
the class TestDataBlockEncoders method getEncodingContext.
private HFileBlockEncodingContext getEncodingContext(Configuration conf, Compression.Algorithm algo, DataBlockEncoding encoding) {
DataBlockEncoder encoder = encoding.getEncoder();
HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTags).withCompression(algo).build();
if (encoder != null) {
return encoder.newDataBlockEncodingContext(conf, encoding, HFILEBLOCK_DUMMY_HEADER, meta);
} else {
return new HFileBlockDefaultEncodingContext(conf, encoding, HFILEBLOCK_DUMMY_HEADER, meta);
}
}
use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.
the class TestReversibleScanners method testReversibleStoreFileScanner.
@Test
public void testReversibleStoreFileScanner() throws IOException {
FileSystem fs = TEST_UTIL.getTestFileSystem();
Path hfilePath = new Path(new Path(TEST_UTIL.getDataTestDir("testReversibleStoreFileScanner"), "regionname"), "familyname");
CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
HFileContextBuilder hcBuilder = new HFileContextBuilder();
hcBuilder.withBlockSize(2 * 1024);
hcBuilder.withDataBlockEncoding(encoding);
HFileContext hFileContext = hcBuilder.build();
StoreFileWriter writer = new StoreFileWriter.Builder(TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir(hfilePath).withFileContext(hFileContext).build();
writeStoreFile(writer);
HStoreFile sf = new HStoreFile(fs, writer.getPath(), TEST_UTIL.getConfiguration(), cacheConf, BloomType.NONE, true);
List<StoreFileScanner> scanners = StoreFileScanner.getScannersForStoreFiles(Collections.singletonList(sf), false, true, false, false, Long.MAX_VALUE);
StoreFileScanner scanner = scanners.get(0);
seekTestOfReversibleKeyValueScanner(scanner);
for (int readPoint = 0; readPoint < MAXMVCC; readPoint++) {
LOG.info("Setting read point to " + readPoint);
scanners = StoreFileScanner.getScannersForStoreFiles(Collections.singletonList(sf), false, true, false, false, readPoint);
seekTestOfReversibleKeyValueScannerWithMVCC(scanners, readPoint);
}
}
}
Aggregations