Search in sources :

Example 1 with HalfStoreFileReader

use of org.apache.hadoop.hbase.io.HalfStoreFileReader in project hbase by apache.

the class StoreFileInfo method open.

/**
   * Open a Reader for the StoreFile
   * @param fs The current file system to use.
   * @param cacheConf The cache configuration and block cache reference.
   * @return The StoreFile.Reader for the file
   */
public StoreFileReader open(final FileSystem fs, final CacheConfig cacheConf, final boolean canUseDropBehind) throws IOException {
    FSDataInputStreamWrapper in;
    FileStatus status;
    final boolean doDropBehind = canUseDropBehind && cacheConf.shouldDropBehindCompaction();
    if (this.link != null) {
        // HFileLink
        in = new FSDataInputStreamWrapper(fs, this.link, doDropBehind);
        status = this.link.getFileStatus(fs);
    } else if (this.reference != null) {
        // HFile Reference
        Path referencePath = getReferredToFile(this.getPath());
        in = new FSDataInputStreamWrapper(fs, referencePath, doDropBehind);
        status = fs.getFileStatus(referencePath);
    } else {
        in = new FSDataInputStreamWrapper(fs, this.getPath(), doDropBehind);
        status = fs.getFileStatus(initialPath);
    }
    long length = status.getLen();
    hdfsBlocksDistribution = computeHDFSBlocksDistribution(fs);
    StoreFileReader reader = null;
    if (this.coprocessorHost != null) {
        reader = this.coprocessorHost.preStoreFileReaderOpen(fs, this.getPath(), in, length, cacheConf, reference);
    }
    if (reader == null) {
        if (this.reference != null) {
            reader = new HalfStoreFileReader(fs, this.getPath(), in, length, cacheConf, reference, conf);
        } else {
            reader = new StoreFileReader(fs, status.getPath(), in, length, cacheConf, conf);
        }
    }
    if (this.coprocessorHost != null) {
        reader = this.coprocessorHost.postStoreFileReaderOpen(fs, this.getPath(), in, length, cacheConf, reference, reader);
    }
    return reader;
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) HalfStoreFileReader(org.apache.hadoop.hbase.io.HalfStoreFileReader) FSDataInputStreamWrapper(org.apache.hadoop.hbase.io.FSDataInputStreamWrapper) HalfStoreFileReader(org.apache.hadoop.hbase.io.HalfStoreFileReader)

Example 2 with HalfStoreFileReader

use of org.apache.hadoop.hbase.io.HalfStoreFileReader in project hbase by apache.

the class LoadIncrementalHFiles method copyHFileHalf.

/**
   * Copy half of an HFile into a new HFile.
   */
private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile, Reference reference, HColumnDescriptor familyDescriptor) throws IOException {
    FileSystem fs = inFile.getFileSystem(conf);
    CacheConfig cacheConf = new CacheConfig(conf);
    HalfStoreFileReader halfReader = null;
    StoreFileWriter halfWriter = null;
    try {
        halfReader = new HalfStoreFileReader(fs, inFile, cacheConf, reference, conf);
        Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();
        int blocksize = familyDescriptor.getBlocksize();
        Algorithm compression = familyDescriptor.getCompressionType();
        BloomType bloomFilterType = familyDescriptor.getBloomFilterType();
        HFileContext hFileContext = new HFileContextBuilder().withCompression(compression).withChecksumType(HStore.getChecksumType(conf)).withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)).withBlockSize(blocksize).withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()).withIncludesTags(true).build();
        halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile).withBloomType(bloomFilterType).withFileContext(hFileContext).build();
        HFileScanner scanner = halfReader.getScanner(false, false, false);
        scanner.seekTo();
        do {
            halfWriter.append(scanner.getCell());
        } while (scanner.next());
        for (Map.Entry<byte[], byte[]> entry : fileInfo.entrySet()) {
            if (shouldCopyHFileMetaKey(entry.getKey())) {
                halfWriter.appendFileInfo(entry.getKey(), entry.getValue());
            }
        }
    } finally {
        if (halfWriter != null) {
            halfWriter.close();
        }
        if (halfReader != null) {
            halfReader.close(cacheConf.shouldEvictOnClose());
        }
    }
}
Also used : StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) HalfStoreFileReader(org.apache.hadoop.hbase.io.HalfStoreFileReader) HFileScanner(org.apache.hadoop.hbase.io.hfile.HFileScanner) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) Algorithm(org.apache.hadoop.hbase.io.compress.Compression.Algorithm) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) BloomType(org.apache.hadoop.hbase.regionserver.BloomType) FileSystem(org.apache.hadoop.fs.FileSystem) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Map(java.util.Map) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap)

Aggregations

HalfStoreFileReader (org.apache.hadoop.hbase.io.HalfStoreFileReader)2 HashMap (java.util.HashMap)1 Map (java.util.Map)1 TreeMap (java.util.TreeMap)1 FileStatus (org.apache.hadoop.fs.FileStatus)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 Path (org.apache.hadoop.fs.Path)1 FSDataInputStreamWrapper (org.apache.hadoop.hbase.io.FSDataInputStreamWrapper)1 Algorithm (org.apache.hadoop.hbase.io.compress.Compression.Algorithm)1 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)1 HFileContext (org.apache.hadoop.hbase.io.hfile.HFileContext)1 HFileContextBuilder (org.apache.hadoop.hbase.io.hfile.HFileContextBuilder)1 HFileScanner (org.apache.hadoop.hbase.io.hfile.HFileScanner)1 BloomType (org.apache.hadoop.hbase.regionserver.BloomType)1 StoreFileWriter (org.apache.hadoop.hbase.regionserver.StoreFileWriter)1