Search in sources :

Example 1 with StoreFileWriter

use of org.apache.hadoop.hbase.regionserver.StoreFileWriter in project hbase by apache.

the class DefaultMobStoreFlusher method flushSnapshot.

/**
   * Flushes the snapshot of the MemStore.
   * If this store is not a mob store, flush the cells in the snapshot to store files of HBase.
   * If the store is a mob one, the flusher flushes the MemStore into two places.
   * One is the store files of HBase, the other is the mob files.
   * <ol>
   * <li>Cells that are not PUT type or have the delete mark will be directly flushed to
   * HBase.</li>
   * <li>If the size of a cell value is larger than a threshold, it'll be
   * flushed to a mob file, another cell with the path of this file will be flushed to HBase.</li>
   * <li>If the size of a cell value is smaller than or equal with a threshold, it'll be flushed to
   * HBase directly.</li>
   * </ol>
   */
@Override
public List<Path> flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushId, MonitoredTask status, ThroughputController throughputController) throws IOException {
    ArrayList<Path> result = new ArrayList<>();
    long cellsCount = snapshot.getCellsCount();
    // don't flush if there are no entries
    if (cellsCount == 0)
        return result;
    // Use a store scanner to find which rows to flush.
    long smallestReadPoint = store.getSmallestReadPoint();
    InternalScanner scanner = createScanner(snapshot.getScanner(), smallestReadPoint);
    if (scanner == null) {
        // NULL scanner returned from coprocessor hooks means skip normal processing
        return result;
    }
    StoreFileWriter writer;
    try {
        // list of store files. Add cleanup of anything put on filesystem if we fail.
        synchronized (flushLock) {
            status.setStatus("Flushing " + store + ": creating writer");
            // Write the map out to the disk
            writer = store.createWriterInTmp(cellsCount, store.getFamily().getCompressionType(), false, true, true, false, snapshot.getTimeRangeTracker());
            IOException e = null;
            try {
                // It's a mob store, flush the cells in a mob way. This is the difference of flushing
                // between a normal and a mob store.
                performMobFlush(snapshot, cacheFlushId, scanner, writer, status, throughputController);
            } catch (IOException ioe) {
                e = ioe;
                // throw the exception out
                throw ioe;
            } finally {
                if (e != null) {
                    writer.close();
                } else {
                    finalizeWriter(writer, cacheFlushId, status);
                }
            }
        }
    } finally {
        scanner.close();
    }
    LOG.info("Mob store is flushed, sequenceid=" + cacheFlushId + ", memsize=" + StringUtils.TraditionalBinaryPrefix.long2String(snapshot.getDataSize(), "", 1) + ", hasBloomFilter=" + writer.hasGeneralBloom() + ", into tmp file " + writer.getPath());
    result.add(writer.getPath());
    return result;
}
Also used : Path(org.apache.hadoop.fs.Path) StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) ArrayList(java.util.ArrayList) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException)

Example 2 with StoreFileWriter

use of org.apache.hadoop.hbase.regionserver.StoreFileWriter in project hbase by apache.

the class TestMobFileCache method createMobStoreFile.

/**
   * Create the mob store file
   */
private Path createMobStoreFile(HColumnDescriptor hcd) throws IOException {
    // Setting up a Store
    TableName tn = TableName.valueOf(TABLE);
    HTableDescriptor htd = new HTableDescriptor(tn);
    htd.addFamily(hcd);
    HMobStore mobStore = (HMobStore) region.getStore(hcd.getName());
    KeyValue key1 = new KeyValue(ROW, hcd.getName(), QF1, 1, VALUE);
    KeyValue key2 = new KeyValue(ROW, hcd.getName(), QF2, 1, VALUE);
    KeyValue key3 = new KeyValue(ROW2, hcd.getName(), QF3, 1, VALUE2);
    KeyValue[] keys = new KeyValue[] { key1, key2, key3 };
    int maxKeyCount = keys.length;
    HRegionInfo regionInfo = new HRegionInfo(tn);
    StoreFileWriter mobWriter = mobStore.createWriterInTmp(currentDate, maxKeyCount, hcd.getCompactionCompression(), regionInfo.getStartKey(), false);
    Path mobFilePath = mobWriter.getPath();
    String fileName = mobFilePath.getName();
    mobWriter.append(key1);
    mobWriter.append(key2);
    mobWriter.append(key3);
    mobWriter.close();
    String targetPathName = MobUtils.formatDate(currentDate);
    Path targetPath = new Path(mobStore.getPath(), targetPathName);
    mobStore.commitFile(mobFilePath, targetPath);
    return new Path(targetPath, fileName);
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Path(org.apache.hadoop.fs.Path) TableName(org.apache.hadoop.hbase.TableName) StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) KeyValue(org.apache.hadoop.hbase.KeyValue) HMobStore(org.apache.hadoop.hbase.regionserver.HMobStore) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 3 with StoreFileWriter

use of org.apache.hadoop.hbase.regionserver.StoreFileWriter in project hbase by apache.

the class LoadIncrementalHFiles method copyHFileHalf.

/**
   * Copy half of an HFile into a new HFile.
   */
private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile, Reference reference, HColumnDescriptor familyDescriptor) throws IOException {
    FileSystem fs = inFile.getFileSystem(conf);
    CacheConfig cacheConf = new CacheConfig(conf);
    HalfStoreFileReader halfReader = null;
    StoreFileWriter halfWriter = null;
    try {
        halfReader = new HalfStoreFileReader(fs, inFile, cacheConf, reference, conf);
        Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();
        int blocksize = familyDescriptor.getBlocksize();
        Algorithm compression = familyDescriptor.getCompressionType();
        BloomType bloomFilterType = familyDescriptor.getBloomFilterType();
        HFileContext hFileContext = new HFileContextBuilder().withCompression(compression).withChecksumType(HStore.getChecksumType(conf)).withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)).withBlockSize(blocksize).withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()).withIncludesTags(true).build();
        halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile).withBloomType(bloomFilterType).withFileContext(hFileContext).build();
        HFileScanner scanner = halfReader.getScanner(false, false, false);
        scanner.seekTo();
        do {
            halfWriter.append(scanner.getCell());
        } while (scanner.next());
        for (Map.Entry<byte[], byte[]> entry : fileInfo.entrySet()) {
            if (shouldCopyHFileMetaKey(entry.getKey())) {
                halfWriter.appendFileInfo(entry.getKey(), entry.getValue());
            }
        }
    } finally {
        if (halfWriter != null) {
            halfWriter.close();
        }
        if (halfReader != null) {
            halfReader.close(cacheConf.shouldEvictOnClose());
        }
    }
}
Also used : StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) HalfStoreFileReader(org.apache.hadoop.hbase.io.HalfStoreFileReader) HFileScanner(org.apache.hadoop.hbase.io.hfile.HFileScanner) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) Algorithm(org.apache.hadoop.hbase.io.compress.Compression.Algorithm) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) BloomType(org.apache.hadoop.hbase.regionserver.BloomType) FileSystem(org.apache.hadoop.fs.FileSystem) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Map(java.util.Map) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap)

Example 4 with StoreFileWriter

use of org.apache.hadoop.hbase.regionserver.StoreFileWriter in project hbase by apache.

the class DefaultMobStoreCompactor method newMobWriter.

private StoreFileWriter newMobWriter(FileDetails fd, boolean major) throws IOException {
    try {
        StoreFileWriter mobFileWriter = mobStore.createWriterInTmp(new Date(fd.latestPutTs), fd.maxKeyCount, major ? majorCompactionCompression : minorCompactionCompression, store.getRegionInfo().getStartKey(), true);
        LOG.debug("New MOB writer created={} store={}", mobFileWriter.getPath().getName(), getStoreInfo());
        // Add reference we get for compact MOB
        mobRefSet.get().put(store.getTableName(), mobFileWriter.getPath().getName());
        return mobFileWriter;
    } catch (IOException e) {
        // Bailing out
        throw new IOException(String.format("Failed to create mob writer, store=%s", getStoreInfo()), e);
    }
}
Also used : StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) InterruptedIOException(java.io.InterruptedIOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) Date(java.util.Date)

Example 5 with StoreFileWriter

use of org.apache.hadoop.hbase.regionserver.StoreFileWriter in project hbase by apache.

the class DefaultMobStoreCompactor method performCompaction.

/**
 * Performs compaction on a column family with the mob flag enabled. This works only when MOB
 * compaction is explicitly requested (by User), or by Master There are two modes of a MOB
 * compaction:<br>
 * <p>
 * <ul>
 * <li>1. Full mode - when all MOB data for a region is compacted into a single MOB file.
 * <li>2. I/O optimized mode - for use cases with no or infrequent updates/deletes of a <br>
 * MOB data. The main idea behind i/o optimized compaction is to limit maximum size of a MOB file
 * produced during compaction and to limit I/O write/read amplification.
 * </ul>
 * The basic algorithm of compaction is the following: <br>
 * 1. If the Put cell has a mob reference tag, the cell's value is the path of the mob file.
 * <ol>
 * <li>If the value size of a cell is larger than the threshold, this cell is regarded as a mob,
 * directly copy the (with mob tag) cell into the new store file.</li>
 * <li>Otherwise, retrieve the mob cell from the mob file, and writes a copy of the cell into the
 * new store file.</li>
 * </ol>
 * 2. If the Put cell doesn't have a reference tag.
 * <ol>
 * <li>If the value size of a cell is larger than the threshold, this cell is regarded as a mob,
 * write this cell to a mob file, and write the path of this mob file to the store file.</li>
 * <li>Otherwise, directly write this cell into the store file.</li>
 * </ol>
 * @param fd File details
 * @param scanner Where to read from.
 * @param smallestReadPoint Smallest read point.
 * @param cleanSeqId When true, remove seqId(used to be mvcc) value which is <= smallestReadPoint
 * @param throughputController The compaction throughput controller.
 * @param major Is a major compaction.
 * @param numofFilesToCompact the number of files to compact
 * @return Whether compaction ended; false if it was interrupted for any reason.
 */
@Override
protected boolean performCompaction(FileDetails fd, InternalScanner scanner, long smallestReadPoint, boolean cleanSeqId, ThroughputController throughputController, boolean major, int numofFilesToCompact) throws IOException {
    long bytesWrittenProgressForLog = 0;
    long bytesWrittenProgressForShippedCall = 0;
    // Clear old mob references
    mobRefSet.get().clear();
    boolean isUserRequest = userRequest.get();
    boolean compactMOBs = major && isUserRequest;
    boolean discardMobMiss = conf.getBoolean(MobConstants.MOB_UNSAFE_DISCARD_MISS_KEY, MobConstants.DEFAULT_MOB_DISCARD_MISS);
    if (discardMobMiss) {
        LOG.warn("{}=true. This is unsafe setting recommended only when first upgrading to a version" + " with the distributed mob compaction feature on a cluster that has experienced MOB data " + "corruption.", MobConstants.MOB_UNSAFE_DISCARD_MISS_KEY);
    }
    long maxMobFileSize = conf.getLong(MobConstants.MOB_COMPACTION_MAX_FILE_SIZE_KEY, MobConstants.DEFAULT_MOB_COMPACTION_MAX_FILE_SIZE);
    boolean ioOptimizedMode = this.ioOptimizedMode && !disableIO.get();
    LOG.info("Compact MOB={} optimized configured={} optimized enabled={} maximum MOB file size={}" + " major={} store={}", compactMOBs, this.ioOptimizedMode, ioOptimizedMode, maxMobFileSize, major, getStoreInfo());
    // Since scanner.next() can return 'false' but still be delivering data,
    // we have to use a do/while loop.
    List<Cell> cells = new ArrayList<>();
    // Limit to "hbase.hstore.compaction.kv.max" (default 10) to avoid OOME
    long currentTime = EnvironmentEdgeManager.currentTime();
    long lastMillis = 0;
    if (LOG.isDebugEnabled()) {
        lastMillis = currentTime;
    }
    CloseChecker closeChecker = new CloseChecker(conf, currentTime);
    String compactionName = ThroughputControlUtil.getNameForThrottling(store, "compaction");
    long now = 0;
    boolean hasMore;
    byte[] fileName = null;
    StoreFileWriter mobFileWriter = null;
    /*
     * mobCells are used only to decide if we need to commit or abort current MOB output file.
     */
    long mobCells = 0;
    long cellsCountCompactedToMob = 0, cellsCountCompactedFromMob = 0;
    long cellsSizeCompactedToMob = 0, cellsSizeCompactedFromMob = 0;
    boolean finished = false;
    ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
    throughputController.start(compactionName);
    KeyValueScanner kvs = (scanner instanceof KeyValueScanner) ? (KeyValueScanner) scanner : null;
    long shippedCallSizeLimit = (long) numofFilesToCompact * this.store.getColumnFamilyDescriptor().getBlocksize();
    Cell mobCell = null;
    try {
        mobFileWriter = newMobWriter(fd, major);
        fileName = Bytes.toBytes(mobFileWriter.getPath().getName());
        do {
            hasMore = scanner.next(cells, scannerContext);
            currentTime = EnvironmentEdgeManager.currentTime();
            if (LOG.isDebugEnabled()) {
                now = currentTime;
            }
            if (closeChecker.isTimeLimit(store, currentTime)) {
                progress.cancel();
                return false;
            }
            for (Cell c : cells) {
                if (compactMOBs) {
                    if (MobUtils.isMobReferenceCell(c)) {
                        String fName = MobUtils.getMobFileName(c);
                        // Added to support migration
                        try {
                            mobCell = mobStore.resolve(c, true, false).getCell();
                        } catch (DoNotRetryIOException e) {
                            if (discardMobMiss && e.getCause() != null && e.getCause() instanceof FileNotFoundException) {
                                LOG.error("Missing MOB cell: file={} not found cell={}", fName, c);
                                continue;
                            } else {
                                throw e;
                            }
                        }
                        if (discardMobMiss && mobCell.getValueLength() == 0) {
                            LOG.error("Missing MOB cell value: file={} mob cell={} cell={}", fName, mobCell, c);
                            continue;
                        } else if (mobCell.getValueLength() == 0) {
                            String errMsg = String.format("Found 0 length MOB cell in a file=%s mob cell=%s " + " cell=%s", fName, mobCell, c);
                            throw new IOException(errMsg);
                        }
                        if (mobCell.getValueLength() > mobSizeThreshold) {
                            // put the mob data back to the MOB store file
                            PrivateCellUtil.setSequenceId(mobCell, c.getSequenceId());
                            if (!ioOptimizedMode) {
                                mobFileWriter.append(mobCell);
                                mobCells++;
                                writer.append(MobUtils.createMobRefCell(mobCell, fileName, this.mobStore.getRefCellTags()));
                            } else {
                                // I/O optimized mode
                                // Check if MOB cell origin file size is
                                // greater than threshold
                                Long size = mobLengthMap.get().get(fName);
                                if (size == null) {
                                    // FATAL error (we should never get here though), abort compaction
                                    // This error means that meta section of store file does not contain
                                    // MOB file, which has references in at least one cell from this store file
                                    String msg = String.format("Found an unexpected MOB file during compaction %s, aborting compaction %s", fName, getStoreInfo());
                                    throw new IOException(msg);
                                }
                                // Can not be null
                                if (size < maxMobFileSize) {
                                    // If MOB cell origin file is below threshold
                                    // it is get compacted
                                    mobFileWriter.append(mobCell);
                                    // Update number of mobCells in a current mob writer
                                    mobCells++;
                                    writer.append(MobUtils.createMobRefCell(mobCell, fileName, this.mobStore.getRefCellTags()));
                                    // Update total size of the output (we do not take into account
                                    // file compression yet)
                                    long len = mobFileWriter.getPos();
                                    if (len > maxMobFileSize) {
                                        LOG.debug("Closing output MOB File, length={} file={}, store={}", len, mobFileWriter.getPath().getName(), getStoreInfo());
                                        commitOrAbortMobWriter(mobFileWriter, fd.maxSeqId, mobCells, major);
                                        mobFileWriter = newMobWriter(fd, major);
                                        fileName = Bytes.toBytes(mobFileWriter.getPath().getName());
                                        mobCells = 0;
                                    }
                                } else {
                                    // We leave large MOB file as is (is not compacted),
                                    // then we update set of MOB file references
                                    // and append mob cell directly to the store's writer
                                    Optional<TableName> refTable = MobUtils.getTableName(c);
                                    if (refTable.isPresent()) {
                                        mobRefSet.get().put(refTable.get(), fName);
                                        writer.append(c);
                                    } else {
                                        throw new IOException(String.format("MOB cell did not contain a tablename " + "tag. should not be possible. see ref guide on mob troubleshooting. " + "store=%s cell=%s", getStoreInfo(), c));
                                    }
                                }
                            }
                        } else {
                            // If MOB value is less than threshold, append it directly to a store file
                            PrivateCellUtil.setSequenceId(mobCell, c.getSequenceId());
                            writer.append(mobCell);
                            cellsCountCompactedFromMob++;
                            cellsSizeCompactedFromMob += mobCell.getValueLength();
                        }
                    } else {
                        // Not a MOB reference cell
                        int size = c.getValueLength();
                        if (size > mobSizeThreshold) {
                            // This MOB cell comes from a regular store file
                            // therefore we store it into original mob output
                            mobFileWriter.append(c);
                            writer.append(MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags()));
                            mobCells++;
                            cellsCountCompactedToMob++;
                            cellsSizeCompactedToMob += c.getValueLength();
                            if (ioOptimizedMode) {
                                // Update total size of the output (we do not take into account
                                // file compression yet)
                                long len = mobFileWriter.getPos();
                                if (len > maxMobFileSize) {
                                    commitOrAbortMobWriter(mobFileWriter, fd.maxSeqId, mobCells, major);
                                    mobFileWriter = newMobWriter(fd, major);
                                    fileName = Bytes.toBytes(mobFileWriter.getPath().getName());
                                    mobCells = 0;
                                }
                            }
                        } else {
                            // Not a MOB cell, write it directly to a store file
                            writer.append(c);
                        }
                    }
                } else if (c.getTypeByte() != KeyValue.Type.Put.getCode()) {
                    // Not a major compaction or major with MOB disabled
                    // If the kv type is not put, directly write the cell
                    // to the store file.
                    writer.append(c);
                } else if (MobUtils.isMobReferenceCell(c)) {
                    // Not a major MOB compaction, Put MOB reference
                    if (MobUtils.hasValidMobRefCellValue(c)) {
                        // We do not check mobSizeThreshold during normal compaction,
                        // leaving it to a MOB compaction run
                        Optional<TableName> refTable = MobUtils.getTableName(c);
                        if (refTable.isPresent()) {
                            mobRefSet.get().put(refTable.get(), MobUtils.getMobFileName(c));
                            writer.append(c);
                        } else {
                            throw new IOException(String.format("MOB cell did not contain a tablename " + "tag. should not be possible. see ref guide on mob troubleshooting. " + "store=%s cell=%s", getStoreInfo(), c));
                        }
                    } else {
                        String errMsg = String.format("Corrupted MOB reference: %s", c.toString());
                        throw new IOException(errMsg);
                    }
                } else if (c.getValueLength() <= mobSizeThreshold) {
                    // If the value size of a cell is not larger than the threshold, directly write it to
                    // the store file.
                    writer.append(c);
                } else {
                    // If the value size of a cell is larger than the threshold, it's regarded as a mob,
                    // write this cell to a mob file, and write the path to the store file.
                    mobCells++;
                    // append the original keyValue in the mob file.
                    mobFileWriter.append(c);
                    Cell reference = MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags());
                    // write the cell whose value is the path of a mob file to the store file.
                    writer.append(reference);
                    cellsCountCompactedToMob++;
                    cellsSizeCompactedToMob += c.getValueLength();
                    if (ioOptimizedMode) {
                        long len = mobFileWriter.getPos();
                        if (len > maxMobFileSize) {
                            commitOrAbortMobWriter(mobFileWriter, fd.maxSeqId, mobCells, major);
                            mobFileWriter = newMobWriter(fd, major);
                            fileName = Bytes.toBytes(mobFileWriter.getPath().getName());
                            mobCells = 0;
                        }
                    }
                }
                int len = c.getSerializedSize();
                ++progress.currentCompactedKVs;
                progress.totalCompactedSize += len;
                bytesWrittenProgressForShippedCall += len;
                if (LOG.isDebugEnabled()) {
                    bytesWrittenProgressForLog += len;
                }
                throughputController.control(compactionName, len);
                if (closeChecker.isSizeLimit(store, len)) {
                    progress.cancel();
                    return false;
                }
                if (kvs != null && bytesWrittenProgressForShippedCall > shippedCallSizeLimit) {
                    ((ShipperListener) writer).beforeShipped();
                    kvs.shipped();
                    bytesWrittenProgressForShippedCall = 0;
                }
            }
            // logging at DEBUG level
            if (LOG.isDebugEnabled()) {
                if ((now - lastMillis) >= COMPACTION_PROGRESS_LOG_INTERVAL) {
                    String rate = String.format("%.2f", (bytesWrittenProgressForLog / 1024.0) / ((now - lastMillis) / 1000.0));
                    LOG.debug("Compaction progress: {} {}, rate={} KB/sec, throughputController is {}", compactionName, progress, rate, throughputController);
                    lastMillis = now;
                    bytesWrittenProgressForLog = 0;
                }
            }
            cells.clear();
        } while (hasMore);
        finished = true;
    } catch (InterruptedException e) {
        progress.cancel();
        throw new InterruptedIOException("Interrupted while control throughput of compacting " + compactionName);
    } catch (IOException t) {
        String msg = "Mob compaction failed for region: " + store.getRegionInfo().getEncodedName();
        throw new IOException(msg, t);
    } finally {
        // Clone last cell in the final because writer will append last cell when committing. If
        // don't clone here and once the scanner get closed, then the memory of last cell will be
        // released. (HBASE-22582)
        ((ShipperListener) writer).beforeShipped();
        throughputController.finish(compactionName);
        if (!finished && mobFileWriter != null) {
            // Remove all MOB references because compaction failed
            clearThreadLocals();
            // Abort writer
            LOG.debug("Aborting writer for {} because of a compaction failure, Store {}", mobFileWriter.getPath(), getStoreInfo());
            abortWriter(mobFileWriter);
        }
    }
    // Commit last MOB writer
    commitOrAbortMobWriter(mobFileWriter, fd.maxSeqId, mobCells, major);
    mobStore.updateCellsCountCompactedFromMob(cellsCountCompactedFromMob);
    mobStore.updateCellsCountCompactedToMob(cellsCountCompactedToMob);
    mobStore.updateCellsSizeCompactedFromMob(cellsSizeCompactedFromMob);
    mobStore.updateCellsSizeCompactedToMob(cellsSizeCompactedToMob);
    progress.complete();
    return true;
}
Also used : CloseChecker(org.apache.hadoop.hbase.regionserver.compactions.CloseChecker) StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) InterruptedIOException(java.io.InterruptedIOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ArrayList(java.util.ArrayList) KeyValueScanner(org.apache.hadoop.hbase.regionserver.KeyValueScanner) FileNotFoundException(java.io.FileNotFoundException) InterruptedIOException(java.io.InterruptedIOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) TableName(org.apache.hadoop.hbase.TableName) Cell(org.apache.hadoop.hbase.Cell) ScannerContext(org.apache.hadoop.hbase.regionserver.ScannerContext) ShipperListener(org.apache.hadoop.hbase.regionserver.ShipperListener)

Aggregations

StoreFileWriter (org.apache.hadoop.hbase.regionserver.StoreFileWriter)30 Path (org.apache.hadoop.fs.Path)23 HFileContextBuilder (org.apache.hadoop.hbase.io.hfile.HFileContextBuilder)14 HFileContext (org.apache.hadoop.hbase.io.hfile.HFileContext)13 IOException (java.io.IOException)11 Cell (org.apache.hadoop.hbase.Cell)11 InterruptedIOException (java.io.InterruptedIOException)9 ArrayList (java.util.ArrayList)9 FileSystem (org.apache.hadoop.fs.FileSystem)9 KeyValue (org.apache.hadoop.hbase.KeyValue)9 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)6 Test (org.junit.Test)6 Date (java.util.Date)5 TableName (org.apache.hadoop.hbase.TableName)5 ScannerContext (org.apache.hadoop.hbase.regionserver.ScannerContext)5 Map (java.util.Map)4 HashMap (java.util.HashMap)3 TreeMap (java.util.TreeMap)3 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)3 Algorithm (org.apache.hadoop.hbase.io.compress.Compression.Algorithm)3