use of org.apache.hadoop.hbase.io.HalfStoreFileReader in project hbase by apache.
the class StoreFileInfo method open.
/**
* Open a Reader for the StoreFile
* @param fs The current file system to use.
* @param cacheConf The cache configuration and block cache reference.
* @return The StoreFile.Reader for the file
*/
public StoreFileReader open(final FileSystem fs, final CacheConfig cacheConf, final boolean canUseDropBehind) throws IOException {
FSDataInputStreamWrapper in;
FileStatus status;
final boolean doDropBehind = canUseDropBehind && cacheConf.shouldDropBehindCompaction();
if (this.link != null) {
// HFileLink
in = new FSDataInputStreamWrapper(fs, this.link, doDropBehind);
status = this.link.getFileStatus(fs);
} else if (this.reference != null) {
// HFile Reference
Path referencePath = getReferredToFile(this.getPath());
in = new FSDataInputStreamWrapper(fs, referencePath, doDropBehind);
status = fs.getFileStatus(referencePath);
} else {
in = new FSDataInputStreamWrapper(fs, this.getPath(), doDropBehind);
status = fs.getFileStatus(initialPath);
}
long length = status.getLen();
hdfsBlocksDistribution = computeHDFSBlocksDistribution(fs);
StoreFileReader reader = null;
if (this.coprocessorHost != null) {
reader = this.coprocessorHost.preStoreFileReaderOpen(fs, this.getPath(), in, length, cacheConf, reference);
}
if (reader == null) {
if (this.reference != null) {
reader = new HalfStoreFileReader(fs, this.getPath(), in, length, cacheConf, reference, conf);
} else {
reader = new StoreFileReader(fs, status.getPath(), in, length, cacheConf, conf);
}
}
if (this.coprocessorHost != null) {
reader = this.coprocessorHost.postStoreFileReaderOpen(fs, this.getPath(), in, length, cacheConf, reference, reader);
}
return reader;
}
use of org.apache.hadoop.hbase.io.HalfStoreFileReader in project hbase by apache.
the class LoadIncrementalHFiles method copyHFileHalf.
/**
* Copy half of an HFile into a new HFile.
*/
private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile, Reference reference, HColumnDescriptor familyDescriptor) throws IOException {
FileSystem fs = inFile.getFileSystem(conf);
CacheConfig cacheConf = new CacheConfig(conf);
HalfStoreFileReader halfReader = null;
StoreFileWriter halfWriter = null;
try {
halfReader = new HalfStoreFileReader(fs, inFile, cacheConf, reference, conf);
Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();
int blocksize = familyDescriptor.getBlocksize();
Algorithm compression = familyDescriptor.getCompressionType();
BloomType bloomFilterType = familyDescriptor.getBloomFilterType();
HFileContext hFileContext = new HFileContextBuilder().withCompression(compression).withChecksumType(HStore.getChecksumType(conf)).withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)).withBlockSize(blocksize).withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()).withIncludesTags(true).build();
halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile).withBloomType(bloomFilterType).withFileContext(hFileContext).build();
HFileScanner scanner = halfReader.getScanner(false, false, false);
scanner.seekTo();
do {
halfWriter.append(scanner.getCell());
} while (scanner.next());
for (Map.Entry<byte[], byte[]> entry : fileInfo.entrySet()) {
if (shouldCopyHFileMetaKey(entry.getKey())) {
halfWriter.appendFileInfo(entry.getKey(), entry.getValue());
}
}
} finally {
if (halfWriter != null) {
halfWriter.close();
}
if (halfReader != null) {
halfReader.close(cacheConf.shouldEvictOnClose());
}
}
}
Aggregations