Search in sources :

Example 21 with StoreFile

use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.

the class TestMobCompactor method verifyEncryption.

private boolean verifyEncryption(TableName tableName, String familyName) throws IOException {
    Path mobDirPath = MobUtils.getMobFamilyPath(conf, tableName, familyName);
    boolean hasFiles = false;
    if (fs.exists(mobDirPath)) {
        FileStatus[] files = fs.listStatus(mobDirPath);
        hasFiles = files != null && files.length > 0;
        Assert.assertTrue(hasFiles);
        Path path = files[0].getPath();
        CacheConfig cacheConf = new CacheConfig(conf);
        StoreFile sf = new StoreFile(TEST_UTIL.getTestFileSystem(), path, conf, cacheConf, BloomType.NONE);
        HFile.Reader reader = sf.createReader().getHFileReader();
        byte[] encryptionKey = reader.getTrailer().getEncryptionKey();
        Assert.assertTrue(null != encryptionKey);
        Assert.assertTrue(reader.getFileContext().getEncryptionContext().getCipher().getName().equals(HConstants.CIPHER_AES));
    }
    return hasFiles;
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) HFile(org.apache.hadoop.hbase.io.hfile.HFile) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig)

Example 22 with StoreFile

use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.

the class TestVisibilityLabels method testFlushedFileWithVisibilityTags.

@Test
public void testFlushedFileWithVisibilityTags() throws Exception {
    final byte[] qual2 = Bytes.toBytes("qual2");
    TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
    HTableDescriptor desc = new HTableDescriptor(tableName);
    HColumnDescriptor col = new HColumnDescriptor(fam);
    desc.addFamily(col);
    TEST_UTIL.getAdmin().createTable(desc);
    try (Table table = TEST_UTIL.getConnection().getTable(tableName)) {
        Put p1 = new Put(row1);
        p1.addColumn(fam, qual, value);
        p1.setCellVisibility(new CellVisibility(CONFIDENTIAL));
        Put p2 = new Put(row1);
        p2.addColumn(fam, qual2, value);
        p2.setCellVisibility(new CellVisibility(SECRET));
        RowMutations rm = new RowMutations(row1);
        rm.add(p1);
        rm.add(p2);
        table.mutateRow(rm);
    }
    TEST_UTIL.getAdmin().flush(tableName);
    List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
    Store store = regions.get(0).getStore(fam);
    Collection<StoreFile> storefiles = store.getStorefiles();
    assertTrue(storefiles.size() > 0);
    for (StoreFile storeFile : storefiles) {
        assertTrue(storeFile.getReader().getHFileReader().getFileContext().isIncludesTags());
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Store(org.apache.hadoop.hbase.regionserver.Store) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) RowMutations(org.apache.hadoop.hbase.client.RowMutations) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) Test(org.junit.Test)

Example 23 with StoreFile

use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.

the class MergeTableRegionsProcedure method mergeStoreFiles.

/**
   * Create reference file(s) of merging regions under the merges directory
   * @param env MasterProcedureEnv
   * @param regionFs region file system
   * @param mergedDir the temp directory of merged region
   * @throws IOException
   */
private void mergeStoreFiles(final MasterProcedureEnv env, final HRegionFileSystem regionFs, final Path mergedDir) throws IOException {
    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
    final Configuration conf = env.getMasterConfiguration();
    final HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
    for (String family : regionFs.getFamilies()) {
        final HColumnDescriptor hcd = htd.getFamily(family.getBytes());
        final Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family);
        if (storeFiles != null && storeFiles.size() > 0) {
            final CacheConfig cacheConf = new CacheConfig(conf, hcd);
            for (StoreFileInfo storeFileInfo : storeFiles) {
                // Create reference file(s) of the region in mergedDir
                regionFs.mergeStoreFile(mergedRegionInfo, family, new StoreFile(mfs.getFileSystem(), storeFileInfo, conf, cacheConf, hcd.getBloomFilterType()), mergedDir);
            }
        }
    }
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) Configuration(org.apache.hadoop.conf.Configuration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) StoreFileInfo(org.apache.hadoop.hbase.regionserver.StoreFileInfo) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 24 with StoreFile

use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.

the class Compactor method getFileDetails.

/**
   * Extracts some details about the files to compact that are commonly needed by compactors.
   * @param filesToCompact Files.
   * @param allFiles Whether all files are included for compaction
   * @return The result.
   */
protected FileDetails getFileDetails(Collection<StoreFile> filesToCompact, boolean allFiles) throws IOException {
    FileDetails fd = new FileDetails();
    long oldestHFileTimeStampToKeepMVCC = System.currentTimeMillis() - (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod);
    for (StoreFile file : filesToCompact) {
        if (allFiles && (file.getModificationTimeStamp() < oldestHFileTimeStampToKeepMVCC)) {
            // MVCC value to keep
            if (fd.minSeqIdToKeep < file.getMaxMemstoreTS()) {
                fd.minSeqIdToKeep = file.getMaxMemstoreTS();
            }
        }
        long seqNum = file.getMaxSequenceId();
        fd.maxSeqId = Math.max(fd.maxSeqId, seqNum);
        StoreFileReader r = file.getReader();
        if (r == null) {
            LOG.warn("Null reader for " + file.getPath());
            continue;
        }
        // NOTE: use getEntries when compacting instead of getFilterEntries, otherwise under-sized
        // blooms can cause progress to be miscalculated or if the user switches bloom
        // type (e.g. from ROW to ROWCOL)
        long keyCount = r.getEntries();
        fd.maxKeyCount += keyCount;
        // calculate the latest MVCC readpoint in any of the involved store files
        Map<byte[], byte[]> fileInfo = r.loadFileInfo();
        byte[] tmp = null;
        // SeqId number.
        if (r.isBulkLoaded()) {
            fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, r.getSequenceID());
        } else {
            tmp = fileInfo.get(HFile.Writer.MAX_MEMSTORE_TS_KEY);
            if (tmp != null) {
                fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp));
            }
        }
        tmp = fileInfo.get(FileInfo.MAX_TAGS_LEN);
        if (tmp != null) {
            fd.maxTagsLength = Math.max(fd.maxTagsLength, Bytes.toInt(tmp));
        }
        // If required, calculate the earliest put timestamp of all involved storefiles.
        // This is used to remove family delete marker during compaction.
        long earliestPutTs = 0;
        if (allFiles) {
            tmp = fileInfo.get(StoreFile.EARLIEST_PUT_TS);
            if (tmp == null) {
                // There's a file with no information, must be an old one
                // assume we have very old puts
                fd.earliestPutTs = earliestPutTs = HConstants.OLDEST_TIMESTAMP;
            } else {
                earliestPutTs = Bytes.toLong(tmp);
                fd.earliestPutTs = Math.min(fd.earliestPutTs, earliestPutTs);
            }
        }
        tmp = fileInfo.get(StoreFile.TIMERANGE_KEY);
        TimeRangeTracker trt = TimeRangeTracker.getTimeRangeTracker(tmp);
        fd.latestPutTs = trt == null ? HConstants.LATEST_TIMESTAMP : trt.getMax();
        if (LOG.isDebugEnabled()) {
            LOG.debug("Compacting " + file + ", keycount=" + keyCount + ", bloomtype=" + r.getBloomFilterType().toString() + ", size=" + TraditionalBinaryPrefix.long2String(r.length(), "", 1) + ", encoding=" + r.getHFileReader().getDataBlockEncoding() + ", seqNum=" + seqNum + (allFiles ? ", earliestPutTs=" + earliestPutTs : ""));
        }
    }
    return fd;
}
Also used : StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) TimeRangeTracker(org.apache.hadoop.hbase.regionserver.TimeRangeTracker) StoreFileReader(org.apache.hadoop.hbase.regionserver.StoreFileReader)

Example 25 with StoreFile

use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.

the class DateTieredCompactionPolicy method getCompactBoundariesForMajor.

/**
   * Return a list of boundaries for multiple compaction output
   *   in ascending order.
   */
private List<Long> getCompactBoundariesForMajor(Collection<StoreFile> filesToCompact, long now) {
    long minTimestamp = Long.MAX_VALUE;
    for (StoreFile file : filesToCompact) {
        minTimestamp = Math.min(minTimestamp, file.getMinimumTimestamp() == null ? Long.MAX_VALUE : file.getMinimumTimestamp());
    }
    List<Long> boundaries = new ArrayList<>();
    // Add startMillis of all windows between now and min timestamp
    for (CompactionWindow window = getIncomingWindow(now); window.compareToTimestamp(minTimestamp) > 0; window = window.nextEarlierWindow()) {
        boundaries.add(window.startMillis());
    }
    boundaries.add(Long.MIN_VALUE);
    Collections.reverse(boundaries);
    return boundaries;
}
Also used : StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) ArrayList(java.util.ArrayList)

Aggregations

StoreFile (org.apache.hadoop.hbase.regionserver.StoreFile)52 ArrayList (java.util.ArrayList)22 Path (org.apache.hadoop.fs.Path)15 Test (org.junit.Test)13 IOException (java.io.IOException)10 Store (org.apache.hadoop.hbase.regionserver.Store)6 StripeInformationProvider (org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider)6 StoreFileReader (org.apache.hadoop.hbase.regionserver.StoreFileReader)5 ImmutableList (com.google.common.collect.ImmutableList)4 Configuration (org.apache.hadoop.conf.Configuration)4 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)4 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)4 Put (org.apache.hadoop.hbase.client.Put)4 StoreFileScanner (org.apache.hadoop.hbase.regionserver.StoreFileScanner)4 FileStatus (org.apache.hadoop.fs.FileStatus)3 Cell (org.apache.hadoop.hbase.Cell)3 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)3 StoreFileWriter (org.apache.hadoop.hbase.regionserver.StoreFileWriter)3 ConcatenatedLists (org.apache.hadoop.hbase.util.ConcatenatedLists)3 FileNotFoundException (java.io.FileNotFoundException)2