Search in sources :

Example 41 with HFileContextBuilder

use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.

the class BulkLoadHFilesTool method copyHFileHalf.

/**
 * Copy half of an HFile into a new HFile.
 */
private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile, Reference reference, ColumnFamilyDescriptor familyDescriptor) throws IOException {
    FileSystem fs = inFile.getFileSystem(conf);
    CacheConfig cacheConf = CacheConfig.DISABLED;
    HalfStoreFileReader halfReader = null;
    StoreFileWriter halfWriter = null;
    try {
        ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, inFile).build();
        HFileInfo hfile = new HFileInfo(context, conf);
        halfReader = new HalfStoreFileReader(context, hfile, cacheConf, reference, new AtomicInteger(0), conf);
        hfile.initMetaAndIndex(halfReader.getHFileReader());
        Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();
        int blocksize = familyDescriptor.getBlocksize();
        Algorithm compression = familyDescriptor.getCompressionType();
        BloomType bloomFilterType = familyDescriptor.getBloomFilterType();
        HFileContext hFileContext = new HFileContextBuilder().withCompression(compression).withChecksumType(StoreUtils.getChecksumType(conf)).withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blocksize).withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()).withIncludesTags(true).build();
        halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile).withBloomType(bloomFilterType).withFileContext(hFileContext).build();
        HFileScanner scanner = halfReader.getScanner(false, false, false);
        scanner.seekTo();
        do {
            halfWriter.append(scanner.getCell());
        } while (scanner.next());
        for (Map.Entry<byte[], byte[]> entry : fileInfo.entrySet()) {
            if (shouldCopyHFileMetaKey(entry.getKey())) {
                halfWriter.appendFileInfo(entry.getKey(), entry.getValue());
            }
        }
    } finally {
        if (halfReader != null) {
            try {
                halfReader.close(cacheConf.shouldEvictOnClose());
            } catch (IOException e) {
                LOG.warn("failed to close hfile reader for " + inFile, e);
            }
        }
        if (halfWriter != null) {
            halfWriter.close();
        }
    }
}
Also used : StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) HalfStoreFileReader(org.apache.hadoop.hbase.io.HalfStoreFileReader) HFileScanner(org.apache.hadoop.hbase.io.hfile.HFileScanner) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) Algorithm(org.apache.hadoop.hbase.io.compress.Compression.Algorithm) HFileInfo(org.apache.hadoop.hbase.io.hfile.HFileInfo) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) BloomType(org.apache.hadoop.hbase.regionserver.BloomType) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) FileSystem(org.apache.hadoop.fs.FileSystem) ReaderContext(org.apache.hadoop.hbase.io.hfile.ReaderContext) ReaderContextBuilder(org.apache.hadoop.hbase.io.hfile.ReaderContextBuilder) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Map(java.util.Map) SortedMap(java.util.SortedMap) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap)

Example 42 with HFileContextBuilder

use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.

the class CompressionTest method doSmokeTest.

public static void doSmokeTest(FileSystem fs, Path path, String codec) throws Exception {
    Configuration conf = HBaseConfiguration.create();
    HFileContext context = new HFileContextBuilder().withCompression(HFileWriterImpl.compressionByName(codec)).build();
    HFile.Writer writer = HFile.getWriterFactoryNoCache(conf).withPath(fs, path).withFileContext(context).create();
    // Write any-old Cell...
    final byte[] rowKey = Bytes.toBytes("compressiontestkey");
    Cell c = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(rowKey).setFamily(HConstants.EMPTY_BYTE_ARRAY).setQualifier(HConstants.EMPTY_BYTE_ARRAY).setTimestamp(HConstants.LATEST_TIMESTAMP).setType(KeyValue.Type.Maximum.getCode()).setValue(Bytes.toBytes("compressiontestval")).build();
    writer.append(c);
    writer.appendFileInfo(Bytes.toBytes("compressioninfokey"), Bytes.toBytes("compressioninfoval"));
    writer.close();
    Cell cc = null;
    HFile.Reader reader = HFile.createReader(fs, path, CacheConfig.DISABLED, true, conf);
    try {
        HFileScanner scanner = reader.getScanner(conf, false, true);
        // position to the start of file
        scanner.seekTo();
        // Scanner does not do Cells yet. Do below for now till fixed.
        cc = scanner.getCell();
        if (CellComparator.getInstance().compareRows(c, cc) != 0) {
            throw new Exception("Read back incorrect result: " + c.toString() + " vs " + cc.toString());
        }
    } finally {
        reader.close();
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) HFileScanner(org.apache.hadoop.hbase.io.hfile.HFileScanner) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) HFile(org.apache.hadoop.hbase.io.hfile.HFile) Cell(org.apache.hadoop.hbase.Cell) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext)

Example 43 with HFileContextBuilder

use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.

the class HFileOutputFormat2 method createRecordWriter.

static <V extends Cell> RecordWriter<ImmutableBytesWritable, V> createRecordWriter(final TaskAttemptContext context, final OutputCommitter committer) throws IOException {
    // Get the path of the temporary output file
    final Path outputDir = ((FileOutputCommitter) committer).getWorkPath();
    final Configuration conf = context.getConfiguration();
    final boolean writeMultipleTables = conf.getBoolean(MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false);
    final String writeTableNames = conf.get(OUTPUT_TABLE_NAME_CONF_KEY);
    if (writeTableNames == null || writeTableNames.isEmpty()) {
        throw new IllegalArgumentException("" + OUTPUT_TABLE_NAME_CONF_KEY + " cannot be empty");
    }
    final FileSystem fs = outputDir.getFileSystem(conf);
    // These configs. are from hbase-*.xml
    final long maxsize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE);
    // Invented config.  Add to hbase-*.xml if other than default compression.
    final String defaultCompressionStr = conf.get("hfile.compression", Compression.Algorithm.NONE.getName());
    final Algorithm defaultCompression = HFileWriterImpl.compressionByName(defaultCompressionStr);
    String compressionStr = conf.get(COMPRESSION_OVERRIDE_CONF_KEY);
    final Algorithm overriddenCompression = compressionStr != null ? Compression.getCompressionAlgorithmByName(compressionStr) : null;
    final boolean compactionExclude = conf.getBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", false);
    final Set<String> allTableNames = Arrays.stream(writeTableNames.split(Bytes.toString(tableSeparator))).collect(Collectors.toSet());
    // create a map from column family to the compression algorithm
    final Map<byte[], Algorithm> compressionMap = createFamilyCompressionMap(conf);
    final Map<byte[], BloomType> bloomTypeMap = createFamilyBloomTypeMap(conf);
    final Map<byte[], String> bloomParamMap = createFamilyBloomParamMap(conf);
    final Map<byte[], Integer> blockSizeMap = createFamilyBlockSizeMap(conf);
    String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_OVERRIDE_CONF_KEY);
    final Map<byte[], DataBlockEncoding> datablockEncodingMap = createFamilyDataBlockEncodingMap(conf);
    final DataBlockEncoding overriddenEncoding = dataBlockEncodingStr != null ? DataBlockEncoding.valueOf(dataBlockEncodingStr) : null;
    return new RecordWriter<ImmutableBytesWritable, V>() {

        // Map of families to writers and how much has been output on the writer.
        private final Map<byte[], WriterLength> writers = new TreeMap<>(Bytes.BYTES_COMPARATOR);

        private final Map<byte[], byte[]> previousRows = new TreeMap<>(Bytes.BYTES_COMPARATOR);

        private final long now = EnvironmentEdgeManager.currentTime();

        private byte[] tableNameBytes = writeMultipleTables ? null : Bytes.toBytes(writeTableNames);

        @Override
        public void write(ImmutableBytesWritable row, V cell) throws IOException {
            Cell kv = cell;
            // null input == user explicitly wants to flush
            if (row == null && kv == null) {
                rollWriters(null);
                return;
            }
            byte[] rowKey = CellUtil.cloneRow(kv);
            int length = (PrivateCellUtil.estimatedSerializedSizeOf(kv)) - Bytes.SIZEOF_INT;
            byte[] family = CellUtil.cloneFamily(kv);
            if (writeMultipleTables) {
                tableNameBytes = MultiTableHFileOutputFormat.getTableName(row.get());
                tableNameBytes = TableName.valueOf(tableNameBytes).getNameWithNamespaceInclAsString().getBytes(Charset.defaultCharset());
                if (!allTableNames.contains(Bytes.toString(tableNameBytes))) {
                    throw new IllegalArgumentException("TableName " + Bytes.toString(tableNameBytes) + " not expected");
                }
            }
            byte[] tableAndFamily = getTableNameSuffixedWithFamily(tableNameBytes, family);
            WriterLength wl = this.writers.get(tableAndFamily);
            // If this is a new column family, verify that the directory exists
            if (wl == null) {
                Path writerPath = null;
                if (writeMultipleTables) {
                    Path tableRelPath = getTableRelativePath(tableNameBytes);
                    writerPath = new Path(outputDir, new Path(tableRelPath, Bytes.toString(family)));
                } else {
                    writerPath = new Path(outputDir, Bytes.toString(family));
                }
                fs.mkdirs(writerPath);
                configureStoragePolicy(conf, fs, tableAndFamily, writerPath);
            }
            // This can only happen once a row is finished though
            if (wl != null && wl.written + length >= maxsize && Bytes.compareTo(this.previousRows.get(family), rowKey) != 0) {
                rollWriters(wl);
            }
            // create a new WAL writer, if necessary
            if (wl == null || wl.writer == null) {
                InetSocketAddress[] favoredNodes = null;
                if (conf.getBoolean(LOCALITY_SENSITIVE_CONF_KEY, DEFAULT_LOCALITY_SENSITIVE)) {
                    HRegionLocation loc = null;
                    String tableName = Bytes.toString(tableNameBytes);
                    if (tableName != null) {
                        try (Connection connection = ConnectionFactory.createConnection(createRemoteClusterConf(conf));
                            RegionLocator locator = connection.getRegionLocator(TableName.valueOf(tableName))) {
                            loc = locator.getRegionLocation(rowKey);
                        } catch (Throwable e) {
                            LOG.warn("Something wrong locating rowkey {} in {}", Bytes.toString(rowKey), tableName, e);
                            loc = null;
                        }
                    }
                    if (null == loc) {
                        LOG.trace("Failed get of location, use default writer {}", Bytes.toString(rowKey));
                    } else {
                        LOG.debug("First rowkey: [{}]", Bytes.toString(rowKey));
                        InetSocketAddress initialIsa = new InetSocketAddress(loc.getHostname(), loc.getPort());
                        if (initialIsa.isUnresolved()) {
                            LOG.trace("Failed resolve address {}, use default writer", loc.getHostnamePort());
                        } else {
                            LOG.debug("Use favored nodes writer: {}", initialIsa.getHostString());
                            favoredNodes = new InetSocketAddress[] { initialIsa };
                        }
                    }
                }
                wl = getNewWriter(tableNameBytes, family, conf, favoredNodes);
            }
            // we now have the proper WAL writer. full steam ahead
            PrivateCellUtil.updateLatestStamp(cell, this.now);
            wl.writer.append(kv);
            wl.written += length;
            // Copy the row so we know when a row transition.
            this.previousRows.put(family, rowKey);
        }

        private Path getTableRelativePath(byte[] tableNameBytes) {
            String tableName = Bytes.toString(tableNameBytes);
            String[] tableNameParts = tableName.split(":");
            Path tableRelPath = new Path(tableNameParts[0]);
            if (tableNameParts.length > 1) {
                tableRelPath = new Path(tableRelPath, tableNameParts[1]);
            }
            return tableRelPath;
        }

        private void rollWriters(WriterLength writerLength) throws IOException {
            if (writerLength != null) {
                closeWriter(writerLength);
            } else {
                for (WriterLength wl : this.writers.values()) {
                    closeWriter(wl);
                }
            }
        }

        private void closeWriter(WriterLength wl) throws IOException {
            if (wl.writer != null) {
                LOG.info("Writer=" + wl.writer.getPath() + ((wl.written == 0) ? "" : ", wrote=" + wl.written));
                close(wl.writer);
                wl.writer = null;
            }
            wl.written = 0;
        }

        private Configuration createRemoteClusterConf(Configuration conf) {
            final Configuration newConf = new Configuration(conf);
            final String quorum = conf.get(REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY);
            final String clientPort = conf.get(REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY);
            final String parent = conf.get(REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY);
            if (quorum != null && clientPort != null && parent != null) {
                newConf.set(HConstants.ZOOKEEPER_QUORUM, quorum);
                newConf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.parseInt(clientPort));
                newConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, parent);
            }
            for (Entry<String, String> entry : conf) {
                String key = entry.getKey();
                if (REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY.equals(key) || REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY.equals(key) || REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY.equals(key)) {
                    // Handled them above
                    continue;
                }
                if (entry.getKey().startsWith(REMOTE_CLUSTER_CONF_PREFIX)) {
                    String originalKey = entry.getKey().substring(REMOTE_CLUSTER_CONF_PREFIX.length());
                    if (!originalKey.isEmpty()) {
                        newConf.set(originalKey, entry.getValue());
                    }
                }
            }
            return newConf;
        }

        /*
       * Create a new StoreFile.Writer.
       * @return A WriterLength, containing a new StoreFile.Writer.
       */
        @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "BX_UNBOXING_IMMEDIATELY_REBOXED", justification = "Not important")
        private WriterLength getNewWriter(byte[] tableName, byte[] family, Configuration conf, InetSocketAddress[] favoredNodes) throws IOException {
            byte[] tableAndFamily = getTableNameSuffixedWithFamily(tableName, family);
            Path familydir = new Path(outputDir, Bytes.toString(family));
            if (writeMultipleTables) {
                familydir = new Path(outputDir, new Path(getTableRelativePath(tableName), Bytes.toString(family)));
            }
            WriterLength wl = new WriterLength();
            Algorithm compression = overriddenCompression;
            compression = compression == null ? compressionMap.get(tableAndFamily) : compression;
            compression = compression == null ? defaultCompression : compression;
            BloomType bloomType = bloomTypeMap.get(tableAndFamily);
            bloomType = bloomType == null ? BloomType.NONE : bloomType;
            String bloomParam = bloomParamMap.get(tableAndFamily);
            if (bloomType == BloomType.ROWPREFIX_FIXED_LENGTH) {
                conf.set(BloomFilterUtil.PREFIX_LENGTH_KEY, bloomParam);
            }
            Integer blockSize = blockSizeMap.get(tableAndFamily);
            blockSize = blockSize == null ? HConstants.DEFAULT_BLOCKSIZE : blockSize;
            DataBlockEncoding encoding = overriddenEncoding;
            encoding = encoding == null ? datablockEncodingMap.get(tableAndFamily) : encoding;
            encoding = encoding == null ? DataBlockEncoding.NONE : encoding;
            HFileContextBuilder contextBuilder = new HFileContextBuilder().withCompression(compression).withDataBlockEncoding(encoding).withChecksumType(StoreUtils.getChecksumType(conf)).withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blockSize).withColumnFamily(family).withTableName(tableName);
            if (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS) {
                contextBuilder.withIncludesTags(true);
            }
            HFileContext hFileContext = contextBuilder.build();
            if (null == favoredNodes) {
                wl.writer = new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, fs).withOutputDir(familydir).withBloomType(bloomType).withFileContext(hFileContext).build();
            } else {
                wl.writer = new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, new HFileSystem(fs)).withOutputDir(familydir).withBloomType(bloomType).withFileContext(hFileContext).withFavoredNodes(favoredNodes).build();
            }
            this.writers.put(tableAndFamily, wl);
            return wl;
        }

        private void close(final StoreFileWriter w) throws IOException {
            if (w != null) {
                w.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(EnvironmentEdgeManager.currentTime()));
                w.appendFileInfo(BULKLOAD_TASK_KEY, Bytes.toBytes(context.getTaskAttemptID().toString()));
                w.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(true));
                w.appendFileInfo(EXCLUDE_FROM_MINOR_COMPACTION_KEY, Bytes.toBytes(compactionExclude));
                w.appendTrackedTimestampsToMetadata();
                w.close();
            }
        }

        @Override
        public void close(TaskAttemptContext c) throws IOException, InterruptedException {
            for (WriterLength wl : this.writers.values()) {
                close(wl.writer);
            }
        }
    };
}
Also used : DataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding) StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) RecordWriter(org.apache.hadoop.mapreduce.RecordWriter) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) FileSystem(org.apache.hadoop.fs.FileSystem) HFileSystem(org.apache.hadoop.hbase.fs.HFileSystem) Cell(org.apache.hadoop.hbase.Cell) MapReduceExtendedCell(org.apache.hadoop.hbase.util.MapReduceExtendedCell) Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) FileOutputCommitter(org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter) Connection(org.apache.hadoop.hbase.client.Connection) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) Algorithm(org.apache.hadoop.hbase.io.compress.Compression.Algorithm) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) BloomType(org.apache.hadoop.hbase.regionserver.BloomType) Map(java.util.Map) TreeMap(java.util.TreeMap) HFileSystem(org.apache.hadoop.hbase.fs.HFileSystem)

Example 44 with HFileContextBuilder

use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.

the class StoreFileTrackerBase method createFileContext.

private HFileContext createFileContext(Compression.Algorithm compression, boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context encryptionContext) {
    if (compression == null) {
        compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;
    }
    ColumnFamilyDescriptor family = ctx.getFamily();
    HFileContext hFileContext = new HFileContextBuilder().withIncludesMvcc(includeMVCCReadpoint).withIncludesTags(includesTag).withCompression(compression).withCompressTags(family.isCompressTags()).withChecksumType(StoreUtils.getChecksumType(conf)).withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(family.getBlocksize()).withHBaseCheckSum(true).withDataBlockEncoding(family.getDataBlockEncoding()).withEncryptionContext(encryptionContext).withCreateTime(EnvironmentEdgeManager.currentTime()).withColumnFamily(family.getName()).withTableName(ctx.getTableName().getName()).withCellComparator(ctx.getComparator()).build();
    return hFileContext;
}
Also used : HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext)

Example 45 with HFileContextBuilder

use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.

the class MobUtils method createWriter.

/**
   * Creates a writer for the mob file in temp directory.
   * @param conf The current configuration.
   * @param fs The current file system.
   * @param family The descriptor of the current column family.
   * @param path The path for a temp directory.
   * @param maxKeyCount The key count.
   * @param compression The compression algorithm.
   * @param cacheConfig The current cache config.
   * @param cryptoContext The encryption context.
   * @param checksumType The checksum type.
   * @param bytesPerChecksum The bytes per checksum.
   * @param blocksize The HFile block size.
   * @param bloomType The bloom filter type.
   * @param isCompaction If the writer is used in compaction.
   * @return The writer for the mob file.
   * @throws IOException
   */
public static StoreFileWriter createWriter(Configuration conf, FileSystem fs, HColumnDescriptor family, Path path, long maxKeyCount, Compression.Algorithm compression, CacheConfig cacheConfig, Encryption.Context cryptoContext, ChecksumType checksumType, int bytesPerChecksum, int blocksize, BloomType bloomType, boolean isCompaction) throws IOException {
    if (compression == null) {
        compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;
    }
    final CacheConfig writerCacheConf;
    if (isCompaction) {
        writerCacheConf = new CacheConfig(cacheConfig);
        writerCacheConf.setCacheDataOnWrite(false);
    } else {
        writerCacheConf = cacheConfig;
    }
    HFileContext hFileContext = new HFileContextBuilder().withCompression(compression).withIncludesMvcc(true).withIncludesTags(true).withCompressTags(family.isCompressTags()).withChecksumType(checksumType).withBytesPerCheckSum(bytesPerChecksum).withBlockSize(blocksize).withHBaseCheckSum(true).withDataBlockEncoding(family.getDataBlockEncoding()).withEncryptionContext(cryptoContext).withCreateTime(EnvironmentEdgeManager.currentTime()).build();
    StoreFileWriter w = new StoreFileWriter.Builder(conf, writerCacheConf, fs).withFilePath(path).withComparator(CellComparator.COMPARATOR).withBloomType(bloomType).withMaxKeyCount(maxKeyCount).withFileContext(hFileContext).build();
    return w;
}
Also used : StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext)

Aggregations

HFileContextBuilder (org.apache.hadoop.hbase.io.hfile.HFileContextBuilder)89 HFileContext (org.apache.hadoop.hbase.io.hfile.HFileContext)82 Path (org.apache.hadoop.fs.Path)52 Test (org.junit.Test)48 KeyValue (org.apache.hadoop.hbase.KeyValue)39 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)27 FileSystem (org.apache.hadoop.fs.FileSystem)26 Cell (org.apache.hadoop.hbase.Cell)17 HFile (org.apache.hadoop.hbase.io.hfile.HFile)16 ByteBuffer (java.nio.ByteBuffer)15 Configuration (org.apache.hadoop.conf.Configuration)14 HFileScanner (org.apache.hadoop.hbase.io.hfile.HFileScanner)12 StoreFileWriter (org.apache.hadoop.hbase.regionserver.StoreFileWriter)12 DataOutputStream (java.io.DataOutputStream)6 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)6 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)6 DataBlockEncoding (org.apache.hadoop.hbase.io.encoding.DataBlockEncoding)6 ByteArrayOutputStream (java.io.ByteArrayOutputStream)5 IOException (java.io.IOException)5 ArrayList (java.util.ArrayList)5