Search in sources :

Example 6 with StoreFileWriter

use of org.apache.hadoop.hbase.regionserver.StoreFileWriter in project hbase by apache.

the class PartitionedMobCompactor method compactMobFilesInBatch.

/**
   * Compacts a partition of selected small mob files and all the del files in a batch.
   * @param request The compaction request.
   * @param partition A compaction partition.
   * @param connection To use for transport
   * @param table The current table.
   * @param filesToCompact The files to be compacted.
   * @param batch The number of mob files to be compacted in a batch.
   * @param bulkloadPathOfPartition The directory where the bulkload column of the current
   *   partition is saved.
   * @param bulkloadColumnPath The directory where the bulkload files of current partition
   *   are saved.
   * @param newFiles The paths of new mob files after compactions.
   * @throws IOException if IO failure is encountered
   */
private void compactMobFilesInBatch(PartitionedMobCompactionRequest request, CompactionPartition partition, Connection connection, Table table, List<StoreFile> filesToCompact, int batch, Path bulkloadPathOfPartition, Path bulkloadColumnPath, List<Path> newFiles) throws IOException {
    // open scanner to the selected mob files and del files.
    StoreScanner scanner = createScanner(filesToCompact, ScanType.COMPACT_DROP_DELETES);
    // the mob files to be compacted, not include the del files.
    List<StoreFile> mobFilesToCompact = filesToCompact.subList(0, batch);
    // Pair(maxSeqId, cellsCount)
    Pair<Long, Long> fileInfo = getFileInfo(mobFilesToCompact);
    // open writers for the mob files and new ref store files.
    StoreFileWriter writer = null;
    StoreFileWriter refFileWriter = null;
    Path filePath = null;
    long mobCells = 0;
    boolean cleanupTmpMobFile = false;
    boolean cleanupBulkloadDirOfPartition = false;
    boolean cleanupCommittedMobFile = false;
    boolean closeReaders = true;
    try {
        try {
            writer = MobUtils.createWriter(conf, fs, column, partition.getPartitionId().getLatestDate(), tempPath, Long.MAX_VALUE, column.getCompactionCompressionType(), partition.getPartitionId().getStartKey(), compactionCacheConfig, cryptoContext, true);
            cleanupTmpMobFile = true;
            filePath = writer.getPath();
            byte[] fileName = Bytes.toBytes(filePath.getName());
            // create a temp file and open a writer for it in the bulkloadPath
            refFileWriter = MobUtils.createRefFileWriter(conf, fs, column, bulkloadColumnPath, fileInfo.getSecond().longValue(), compactionCacheConfig, cryptoContext, true);
            cleanupBulkloadDirOfPartition = true;
            List<Cell> cells = new ArrayList<>();
            boolean hasMore;
            ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
            do {
                hasMore = scanner.next(cells, scannerContext);
                for (Cell cell : cells) {
                    // write the mob cell to the mob file.
                    writer.append(cell);
                    // write the new reference cell to the store file.
                    Cell reference = MobUtils.createMobRefCell(cell, fileName, this.refCellTags);
                    refFileWriter.append(reference);
                    mobCells++;
                }
                cells.clear();
            } while (hasMore);
        } finally {
            // close the scanner.
            scanner.close();
            if (cleanupTmpMobFile) {
                // append metadata to the mob file, and close the mob file writer.
                closeMobFileWriter(writer, fileInfo.getFirst(), mobCells);
            }
            if (cleanupBulkloadDirOfPartition) {
                // append metadata and bulkload info to the ref mob file, and close the writer.
                closeRefFileWriter(refFileWriter, fileInfo.getFirst(), request.selectionTime);
            }
        }
        if (mobCells > 0) {
            // commit mob file
            MobUtils.commitFile(conf, fs, filePath, mobFamilyDir, compactionCacheConfig);
            cleanupTmpMobFile = false;
            cleanupCommittedMobFile = true;
            // bulkload the ref file
            bulkloadRefFile(connection, table, bulkloadPathOfPartition, filePath.getName());
            cleanupCommittedMobFile = false;
            newFiles.add(new Path(mobFamilyDir, filePath.getName()));
        }
        // archive the old mob files, do not archive the del files.
        try {
            closeStoreFileReaders(mobFilesToCompact);
            closeReaders = false;
            MobUtils.removeMobFiles(conf, fs, tableName, mobTableDir, column.getName(), mobFilesToCompact);
        } catch (IOException e) {
            LOG.error("Failed to archive the files " + mobFilesToCompact, e);
        }
    } finally {
        if (closeReaders) {
            closeStoreFileReaders(mobFilesToCompact);
        }
        if (cleanupTmpMobFile) {
            deletePath(filePath);
        }
        if (cleanupBulkloadDirOfPartition) {
            // delete the bulkload files in bulkloadPath
            deletePath(bulkloadPathOfPartition);
        }
        if (cleanupCommittedMobFile) {
            deletePath(new Path(mobFamilyDir, filePath.getName()));
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) ArrayList(java.util.ArrayList) IOException(java.io.IOException) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) StoreScanner(org.apache.hadoop.hbase.regionserver.StoreScanner) Cell(org.apache.hadoop.hbase.Cell) ScannerContext(org.apache.hadoop.hbase.regionserver.ScannerContext)

Example 7 with StoreFileWriter

use of org.apache.hadoop.hbase.regionserver.StoreFileWriter in project hbase by apache.

the class DefaultMobStoreCompactor method performCompaction.

/**
   * Performs compaction on a column family with the mob flag enabled.
   * This is for when the mob threshold size has changed or if the mob
   * column family mode has been toggled via an alter table statement.
   * Compacts the files by the following rules.
   * 1. If the Put cell has a mob reference tag, the cell's value is the path of the mob file.
   * <ol>
   * <li>
   * If the value size of a cell is larger than the threshold, this cell is regarded as a mob,
   * directly copy the (with mob tag) cell into the new store file.
   * </li>
   * <li>
   * Otherwise, retrieve the mob cell from the mob file, and writes a copy of the cell into
   * the new store file.
   * </li>
   * </ol>
   * 2. If the Put cell doesn't have a reference tag.
   * <ol>
   * <li>
   * If the value size of a cell is larger than the threshold, this cell is regarded as a mob,
   * write this cell to a mob file, and write the path of this mob file to the store file.
   * </li>
   * <li>
   * Otherwise, directly write this cell into the store file.
   * </li>
   * </ol>
   * 3. Decide how to write a Delete cell.
   * <ol>
   * <li>
   * If a Delete cell does not have a mob reference tag which means this delete marker have not
   * been written to the mob del file, write this cell to the mob del file, and write this cell
   * with a ref tag to a store file.
   * </li>
   * <li>
   * Otherwise, directly write it to a store file.
   * </li>
   * </ol>
   * After the major compaction on the normal hfiles, we have a guarantee that we have purged all
   * deleted or old version mob refs, and the delete markers are written to a del file with the
   * suffix _del. Because of this, it is safe to use the del file in the mob compaction.
   * The mob compaction doesn't take place in the normal hfiles, it occurs directly in the
   * mob files. When the small mob files are merged into bigger ones, the del file is added into
   * the scanner to filter the deleted cells.
   * @param fd File details
   * @param scanner Where to read from.
   * @param writer Where to write to.
   * @param smallestReadPoint Smallest read point.
   * @param cleanSeqId When true, remove seqId(used to be mvcc) value which is <= smallestReadPoint
   * @param throughputController The compaction throughput controller.
   * @param major Is a major compaction.
   * @param numofFilesToCompact the number of files to compact
   * @return Whether compaction ended; false if it was interrupted for any reason.
   */
@Override
protected boolean performCompaction(FileDetails fd, InternalScanner scanner, CellSink writer, long smallestReadPoint, boolean cleanSeqId, ThroughputController throughputController, boolean major, int numofFilesToCompact) throws IOException {
    long bytesWrittenProgressForCloseCheck = 0;
    long bytesWrittenProgressForLog = 0;
    long bytesWrittenProgressForShippedCall = 0;
    // Since scanner.next() can return 'false' but still be delivering data,
    // we have to use a do/while loop.
    List<Cell> cells = new ArrayList<>();
    // Limit to "hbase.hstore.compaction.kv.max" (default 10) to avoid OOME
    int closeCheckSizeLimit = HStore.getCloseCheckInterval();
    long lastMillis = 0;
    if (LOG.isDebugEnabled()) {
        lastMillis = EnvironmentEdgeManager.currentTime();
    }
    String compactionName = ThroughputControlUtil.getNameForThrottling(store, "compaction");
    long now = 0;
    boolean hasMore;
    Path path = MobUtils.getMobFamilyPath(conf, store.getTableName(), store.getColumnFamilyName());
    byte[] fileName = null;
    StoreFileWriter mobFileWriter = null, delFileWriter = null;
    long mobCells = 0, deleteMarkersCount = 0;
    long cellsCountCompactedToMob = 0, cellsCountCompactedFromMob = 0;
    long cellsSizeCompactedToMob = 0, cellsSizeCompactedFromMob = 0;
    boolean finished = false;
    ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
    throughputController.start(compactionName);
    KeyValueScanner kvs = (scanner instanceof KeyValueScanner) ? (KeyValueScanner) scanner : null;
    long shippedCallSizeLimit = (long) numofFilesToCompact * this.store.getFamily().getBlocksize();
    try {
        try {
            // If the mob file writer could not be created, directly write the cell to the store file.
            mobFileWriter = mobStore.createWriterInTmp(new Date(fd.latestPutTs), fd.maxKeyCount, compactionCompression, store.getRegionInfo().getStartKey(), true);
            fileName = Bytes.toBytes(mobFileWriter.getPath().getName());
        } catch (IOException e) {
            LOG.warn("Failed to create mob writer, " + "we will continue the compaction by writing MOB cells directly in store files", e);
        }
        if (major) {
            try {
                delFileWriter = mobStore.createDelFileWriterInTmp(new Date(fd.latestPutTs), fd.maxKeyCount, compactionCompression, store.getRegionInfo().getStartKey());
            } catch (IOException e) {
                LOG.warn("Failed to create del writer, " + "we will continue the compaction by writing delete markers directly in store files", e);
            }
        }
        do {
            hasMore = scanner.next(cells, scannerContext);
            if (LOG.isDebugEnabled()) {
                now = EnvironmentEdgeManager.currentTime();
            }
            for (Cell c : cells) {
                if (major && CellUtil.isDelete(c)) {
                    if (MobUtils.isMobReferenceCell(c) || delFileWriter == null) {
                        // Directly write it to a store file
                        writer.append(c);
                    } else {
                        // Add a ref tag to this cell and write it to a store file.
                        writer.append(MobUtils.createMobRefDeleteMarker(c));
                        // Write the cell to a del file
                        delFileWriter.append(c);
                        deleteMarkersCount++;
                    }
                } else if (mobFileWriter == null || c.getTypeByte() != KeyValue.Type.Put.getCode()) {
                    // If the mob file writer is null or the kv type is not put, directly write the cell
                    // to the store file.
                    writer.append(c);
                } else if (MobUtils.isMobReferenceCell(c)) {
                    if (MobUtils.hasValidMobRefCellValue(c)) {
                        int size = MobUtils.getMobValueLength(c);
                        if (size > mobSizeThreshold) {
                            // If the value size is larger than the threshold, it's regarded as a mob. Since
                            // its value is already in the mob file, directly write this cell to the store file
                            writer.append(c);
                        } else {
                            // If the value is not larger than the threshold, it's not regarded a mob. Retrieve
                            // the mob cell from the mob file, and write it back to the store file.
                            Cell mobCell = mobStore.resolve(c, false);
                            if (mobCell.getValueLength() != 0) {
                                // put the mob data back to the store file
                                CellUtil.setSequenceId(mobCell, c.getSequenceId());
                                writer.append(mobCell);
                                cellsCountCompactedFromMob++;
                                cellsSizeCompactedFromMob += mobCell.getValueLength();
                            } else {
                                // If the value of a file is empty, there might be issues when retrieving,
                                // directly write the cell to the store file, and leave it to be handled by the
                                // next compaction.
                                writer.append(c);
                            }
                        }
                    } else {
                        LOG.warn("The value format of the KeyValue " + c + " is wrong, its length is less than " + Bytes.SIZEOF_INT);
                        writer.append(c);
                    }
                } else if (c.getValueLength() <= mobSizeThreshold) {
                    //If value size of a cell is not larger than the threshold, directly write to store file
                    writer.append(c);
                } else {
                    // If the value size of a cell is larger than the threshold, it's regarded as a mob,
                    // write this cell to a mob file, and write the path to the store file.
                    mobCells++;
                    // append the original keyValue in the mob file.
                    mobFileWriter.append(c);
                    Cell reference = MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags());
                    // write the cell whose value is the path of a mob file to the store file.
                    writer.append(reference);
                    cellsCountCompactedToMob++;
                    cellsSizeCompactedToMob += c.getValueLength();
                }
                int len = KeyValueUtil.length(c);
                ++progress.currentCompactedKVs;
                progress.totalCompactedSize += len;
                bytesWrittenProgressForShippedCall += len;
                if (LOG.isDebugEnabled()) {
                    bytesWrittenProgressForLog += len;
                }
                throughputController.control(compactionName, len);
                // check periodically to see if a system stop is requested
                if (closeCheckSizeLimit > 0) {
                    bytesWrittenProgressForCloseCheck += len;
                    if (bytesWrittenProgressForCloseCheck > closeCheckSizeLimit) {
                        bytesWrittenProgressForCloseCheck = 0;
                        if (!store.areWritesEnabled()) {
                            progress.cancel();
                            return false;
                        }
                    }
                }
                if (kvs != null && bytesWrittenProgressForShippedCall > shippedCallSizeLimit) {
                    ((ShipperListener) writer).beforeShipped();
                    kvs.shipped();
                    bytesWrittenProgressForShippedCall = 0;
                }
            }
            // logging at DEBUG level
            if (LOG.isDebugEnabled()) {
                if ((now - lastMillis) >= COMPACTION_PROGRESS_LOG_INTERVAL) {
                    LOG.debug("Compaction progress: " + compactionName + " " + progress + String.format(", rate=%.2f kB/sec", (bytesWrittenProgressForLog / 1024.0) / ((now - lastMillis) / 1000.0)) + ", throughputController is " + throughputController);
                    lastMillis = now;
                    bytesWrittenProgressForLog = 0;
                }
            }
            cells.clear();
        } while (hasMore);
        finished = true;
    } catch (InterruptedException e) {
        progress.cancel();
        throw new InterruptedIOException("Interrupted while control throughput of compacting " + compactionName);
    } finally {
        throughputController.finish(compactionName);
        if (!finished && mobFileWriter != null) {
            abortWriter(mobFileWriter);
        }
        if (!finished && delFileWriter != null) {
            abortWriter(delFileWriter);
        }
    }
    if (delFileWriter != null) {
        if (deleteMarkersCount > 0) {
            // If the del file is not empty, commit it.
            // If the commit fails, the compaction is re-performed again.
            delFileWriter.appendMetadata(fd.maxSeqId, major, deleteMarkersCount);
            delFileWriter.close();
            mobStore.commitFile(delFileWriter.getPath(), path);
        } else {
            // If the del file is empty, delete it instead of committing.
            abortWriter(delFileWriter);
        }
    }
    if (mobFileWriter != null) {
        if (mobCells > 0) {
            // If the mob file is not empty, commit it.
            mobFileWriter.appendMetadata(fd.maxSeqId, major, mobCells);
            mobFileWriter.close();
            mobStore.commitFile(mobFileWriter.getPath(), path);
        } else {
            // If the mob file is empty, delete it instead of committing.
            abortWriter(mobFileWriter);
        }
    }
    mobStore.updateCellsCountCompactedFromMob(cellsCountCompactedFromMob);
    mobStore.updateCellsCountCompactedToMob(cellsCountCompactedToMob);
    mobStore.updateCellsSizeCompactedFromMob(cellsSizeCompactedFromMob);
    mobStore.updateCellsSizeCompactedToMob(cellsSizeCompactedToMob);
    progress.complete();
    return true;
}
Also used : Path(org.apache.hadoop.fs.Path) StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) InterruptedIOException(java.io.InterruptedIOException) ArrayList(java.util.ArrayList) KeyValueScanner(org.apache.hadoop.hbase.regionserver.KeyValueScanner) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) Date(java.util.Date) Cell(org.apache.hadoop.hbase.Cell) ScannerContext(org.apache.hadoop.hbase.regionserver.ScannerContext) ShipperListener(org.apache.hadoop.hbase.regionserver.ShipperListener)

Example 8 with StoreFileWriter

use of org.apache.hadoop.hbase.regionserver.StoreFileWriter in project hbase by apache.

the class DefaultMobStoreFlusher method performMobFlush.

/**
   * Flushes the cells in the mob store.
   * <ol>In the mob store, the cells with PUT type might have or have no mob tags.
   * <li>If a cell does not have a mob tag, flushing the cell to different files depends
   * on the value length. If the length is larger than a threshold, it's flushed to a
   * mob file and the mob file is flushed to a store file in HBase. Otherwise, directly
   * flush the cell to a store file in HBase.</li>
   * <li>If a cell have a mob tag, its value is a mob file name, directly flush it
   * to a store file in HBase.</li>
   * </ol>
   * @param snapshot Memstore snapshot.
   * @param cacheFlushId Log cache flush sequence number.
   * @param scanner The scanner of memstore snapshot.
   * @param writer The store file writer.
   * @param status Task that represents the flush operation and may be updated with status.
   * @param throughputController A controller to avoid flush too fast.
   * @throws IOException
   */
protected void performMobFlush(MemStoreSnapshot snapshot, long cacheFlushId, InternalScanner scanner, StoreFileWriter writer, MonitoredTask status, ThroughputController throughputController) throws IOException {
    StoreFileWriter mobFileWriter = null;
    int compactionKVMax = conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT);
    long mobCount = 0;
    long mobSize = 0;
    long time = snapshot.getTimeRangeTracker().getMax();
    mobFileWriter = mobStore.createWriterInTmp(new Date(time), snapshot.getCellsCount(), store.getFamily().getCompressionType(), store.getRegionInfo().getStartKey(), false);
    // the target path is {tableName}/.mob/{cfName}/mobFiles
    // the relative path is mobFiles
    byte[] fileName = Bytes.toBytes(mobFileWriter.getPath().getName());
    ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
    List<Cell> cells = new ArrayList<>();
    boolean hasMore;
    String flushName = ThroughputControlUtil.getNameForThrottling(store, "flush");
    boolean control = throughputController != null && !store.getRegionInfo().isSystemTable();
    if (control) {
        throughputController.start(flushName);
    }
    IOException ioe = null;
    try {
        do {
            hasMore = scanner.next(cells, scannerContext);
            if (!cells.isEmpty()) {
                for (Cell c : cells) {
                    // disk.
                    if (c.getValueLength() <= mobCellValueSizeThreshold || MobUtils.isMobReferenceCell(c) || c.getTypeByte() != KeyValue.Type.Put.getCode()) {
                        writer.append(c);
                    } else {
                        // append the original keyValue in the mob file.
                        mobFileWriter.append(c);
                        mobSize += c.getValueLength();
                        mobCount++;
                        // append the tags to the KeyValue.
                        // The key is same, the value is the filename of the mob file
                        Cell reference = MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags());
                        writer.append(reference);
                    }
                    int len = KeyValueUtil.length(c);
                    if (control) {
                        throughputController.control(flushName, len);
                    }
                }
                cells.clear();
            }
        } while (hasMore);
    } catch (InterruptedException e) {
        ioe = new InterruptedIOException("Interrupted while control throughput of flushing " + flushName);
        throw ioe;
    } catch (IOException e) {
        ioe = e;
        throw e;
    } finally {
        if (control) {
            throughputController.finish(flushName);
        }
        if (ioe != null) {
            mobFileWriter.close();
        }
    }
    if (mobCount > 0) {
        // commit the mob file from temp folder to target folder.
        // If the mob file is committed successfully but the store file is not,
        // the committed mob file will be handled by the sweep tool as an unused
        // file.
        status.setStatus("Flushing mob file " + store + ": appending metadata");
        mobFileWriter.appendMetadata(cacheFlushId, false, mobCount);
        status.setStatus("Flushing mob file " + store + ": closing flushed file");
        mobFileWriter.close();
        mobStore.commitFile(mobFileWriter.getPath(), targetPath);
        mobStore.updateMobFlushCount();
        mobStore.updateMobFlushedCellsCount(mobCount);
        mobStore.updateMobFlushedCellsSize(mobSize);
    } else {
        try {
            status.setStatus("Flushing mob file " + store + ": no mob cells, closing flushed file");
            mobFileWriter.close();
            // If the mob file is empty, delete it instead of committing.
            store.getFileSystem().delete(mobFileWriter.getPath(), true);
        } catch (IOException e) {
            LOG.error("Failed to delete the temp mob file", e);
        }
    }
}
Also used : StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) InterruptedIOException(java.io.InterruptedIOException) ArrayList(java.util.ArrayList) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) Date(java.util.Date) ScannerContext(org.apache.hadoop.hbase.regionserver.ScannerContext) Cell(org.apache.hadoop.hbase.Cell)

Example 9 with StoreFileWriter

use of org.apache.hadoop.hbase.regionserver.StoreFileWriter in project hbase by apache.

the class MobUtils method createWriter.

/**
   * Creates a writer for the mob file in temp directory.
   * @param conf The current configuration.
   * @param fs The current file system.
   * @param family The descriptor of the current column family.
   * @param path The path for a temp directory.
   * @param maxKeyCount The key count.
   * @param compression The compression algorithm.
   * @param cacheConfig The current cache config.
   * @param cryptoContext The encryption context.
   * @param checksumType The checksum type.
   * @param bytesPerChecksum The bytes per checksum.
   * @param blocksize The HFile block size.
   * @param bloomType The bloom filter type.
   * @param isCompaction If the writer is used in compaction.
   * @return The writer for the mob file.
   * @throws IOException
   */
public static StoreFileWriter createWriter(Configuration conf, FileSystem fs, HColumnDescriptor family, Path path, long maxKeyCount, Compression.Algorithm compression, CacheConfig cacheConfig, Encryption.Context cryptoContext, ChecksumType checksumType, int bytesPerChecksum, int blocksize, BloomType bloomType, boolean isCompaction) throws IOException {
    if (compression == null) {
        compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;
    }
    final CacheConfig writerCacheConf;
    if (isCompaction) {
        writerCacheConf = new CacheConfig(cacheConfig);
        writerCacheConf.setCacheDataOnWrite(false);
    } else {
        writerCacheConf = cacheConfig;
    }
    HFileContext hFileContext = new HFileContextBuilder().withCompression(compression).withIncludesMvcc(true).withIncludesTags(true).withCompressTags(family.isCompressTags()).withChecksumType(checksumType).withBytesPerCheckSum(bytesPerChecksum).withBlockSize(blocksize).withHBaseCheckSum(true).withDataBlockEncoding(family.getDataBlockEncoding()).withEncryptionContext(cryptoContext).withCreateTime(EnvironmentEdgeManager.currentTime()).build();
    StoreFileWriter w = new StoreFileWriter.Builder(conf, writerCacheConf, fs).withFilePath(path).withComparator(CellComparator.COMPARATOR).withBloomType(bloomType).withMaxKeyCount(maxKeyCount).withFileContext(hFileContext).build();
    return w;
}
Also used : StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext)

Example 10 with StoreFileWriter

use of org.apache.hadoop.hbase.regionserver.StoreFileWriter in project hbase by apache.

the class PartitionedMobCompactor method compactDelFilesInBatch.

/**
   * Compacts the del file in a batch.
   * @param request The compaction request.
   * @param delFiles The del files.
   * @return The path of new del file after merging.
   * @throws IOException if IO failure is encountered
   */
private Path compactDelFilesInBatch(PartitionedMobCompactionRequest request, List<StoreFile> delFiles) throws IOException {
    // create a scanner for the del files.
    StoreScanner scanner = createScanner(delFiles, ScanType.COMPACT_RETAIN_DELETES);
    StoreFileWriter writer = null;
    Path filePath = null;
    try {
        writer = MobUtils.createDelFileWriter(conf, fs, column, MobUtils.formatDate(new Date(request.selectionTime)), tempPath, Long.MAX_VALUE, column.getCompactionCompressionType(), HConstants.EMPTY_START_ROW, compactionCacheConfig, cryptoContext);
        filePath = writer.getPath();
        List<Cell> cells = new ArrayList<>();
        boolean hasMore;
        ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
        do {
            hasMore = scanner.next(cells, scannerContext);
            for (Cell cell : cells) {
                writer.append(cell);
            }
            cells.clear();
        } while (hasMore);
    } finally {
        scanner.close();
        if (writer != null) {
            try {
                writer.close();
            } catch (IOException e) {
                LOG.error("Failed to close the writer of the file " + filePath, e);
            }
        }
    }
    // commit the new del file
    Path path = MobUtils.commitFile(conf, fs, filePath, mobFamilyDir, compactionCacheConfig);
    // archive the old del files
    try {
        MobUtils.removeMobFiles(conf, fs, tableName, mobTableDir, column.getName(), delFiles);
    } catch (IOException e) {
        LOG.error("Failed to archive the old del files " + delFiles, e);
    }
    return path;
}
Also used : Path(org.apache.hadoop.fs.Path) StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) ArrayList(java.util.ArrayList) IOException(java.io.IOException) StoreScanner(org.apache.hadoop.hbase.regionserver.StoreScanner) Cell(org.apache.hadoop.hbase.Cell) ScannerContext(org.apache.hadoop.hbase.regionserver.ScannerContext) Date(java.util.Date)

Aggregations

StoreFileWriter (org.apache.hadoop.hbase.regionserver.StoreFileWriter)17 Path (org.apache.hadoop.fs.Path)14 Cell (org.apache.hadoop.hbase.Cell)7 KeyValue (org.apache.hadoop.hbase.KeyValue)7 HFileContext (org.apache.hadoop.hbase.io.hfile.HFileContext)7 HFileContextBuilder (org.apache.hadoop.hbase.io.hfile.HFileContextBuilder)7 ArrayList (java.util.ArrayList)6 FileSystem (org.apache.hadoop.fs.FileSystem)6 Test (org.junit.Test)6 IOException (java.io.IOException)5 ScannerContext (org.apache.hadoop.hbase.regionserver.ScannerContext)4 InterruptedIOException (java.io.InterruptedIOException)3 Date (java.util.Date)3 StoreFile (org.apache.hadoop.hbase.regionserver.StoreFile)3 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)2 BloomType (org.apache.hadoop.hbase.regionserver.BloomType)2 StoreScanner (org.apache.hadoop.hbase.regionserver.StoreScanner)2 HashMap (java.util.HashMap)1 Map (java.util.Map)1 Random (java.util.Random)1