use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.
the class TestMobCompactor method verifyEncryption.
private boolean verifyEncryption(TableName tableName, String familyName) throws IOException {
Path mobDirPath = MobUtils.getMobFamilyPath(conf, tableName, familyName);
boolean hasFiles = false;
if (fs.exists(mobDirPath)) {
FileStatus[] files = fs.listStatus(mobDirPath);
hasFiles = files != null && files.length > 0;
Assert.assertTrue(hasFiles);
Path path = files[0].getPath();
CacheConfig cacheConf = new CacheConfig(conf);
StoreFile sf = new StoreFile(TEST_UTIL.getTestFileSystem(), path, conf, cacheConf, BloomType.NONE);
HFile.Reader reader = sf.createReader().getHFileReader();
byte[] encryptionKey = reader.getTrailer().getEncryptionKey();
Assert.assertTrue(null != encryptionKey);
Assert.assertTrue(reader.getFileContext().getEncryptionContext().getCipher().getName().equals(HConstants.CIPHER_AES));
}
return hasFiles;
}
use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.
the class TestVisibilityLabels method testFlushedFileWithVisibilityTags.
@Test
public void testFlushedFileWithVisibilityTags() throws Exception {
final byte[] qual2 = Bytes.toBytes("qual2");
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
HTableDescriptor desc = new HTableDescriptor(tableName);
HColumnDescriptor col = new HColumnDescriptor(fam);
desc.addFamily(col);
TEST_UTIL.getAdmin().createTable(desc);
try (Table table = TEST_UTIL.getConnection().getTable(tableName)) {
Put p1 = new Put(row1);
p1.addColumn(fam, qual, value);
p1.setCellVisibility(new CellVisibility(CONFIDENTIAL));
Put p2 = new Put(row1);
p2.addColumn(fam, qual2, value);
p2.setCellVisibility(new CellVisibility(SECRET));
RowMutations rm = new RowMutations(row1);
rm.add(p1);
rm.add(p2);
table.mutateRow(rm);
}
TEST_UTIL.getAdmin().flush(tableName);
List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
Store store = regions.get(0).getStore(fam);
Collection<StoreFile> storefiles = store.getStorefiles();
assertTrue(storefiles.size() > 0);
for (StoreFile storeFile : storefiles) {
assertTrue(storeFile.getReader().getHFileReader().getFileContext().isIncludesTags());
}
}
use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.
the class MergeTableRegionsProcedure method mergeStoreFiles.
/**
* Create reference file(s) of merging regions under the merges directory
* @param env MasterProcedureEnv
* @param regionFs region file system
* @param mergedDir the temp directory of merged region
* @throws IOException
*/
private void mergeStoreFiles(final MasterProcedureEnv env, final HRegionFileSystem regionFs, final Path mergedDir) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
final Configuration conf = env.getMasterConfiguration();
final HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
for (String family : regionFs.getFamilies()) {
final HColumnDescriptor hcd = htd.getFamily(family.getBytes());
final Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family);
if (storeFiles != null && storeFiles.size() > 0) {
final CacheConfig cacheConf = new CacheConfig(conf, hcd);
for (StoreFileInfo storeFileInfo : storeFiles) {
// Create reference file(s) of the region in mergedDir
regionFs.mergeStoreFile(mergedRegionInfo, family, new StoreFile(mfs.getFileSystem(), storeFileInfo, conf, cacheConf, hcd.getBloomFilterType()), mergedDir);
}
}
}
}
use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.
the class Compactor method getFileDetails.
/**
* Extracts some details about the files to compact that are commonly needed by compactors.
* @param filesToCompact Files.
* @param allFiles Whether all files are included for compaction
* @return The result.
*/
protected FileDetails getFileDetails(Collection<StoreFile> filesToCompact, boolean allFiles) throws IOException {
FileDetails fd = new FileDetails();
long oldestHFileTimeStampToKeepMVCC = System.currentTimeMillis() - (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod);
for (StoreFile file : filesToCompact) {
if (allFiles && (file.getModificationTimeStamp() < oldestHFileTimeStampToKeepMVCC)) {
// MVCC value to keep
if (fd.minSeqIdToKeep < file.getMaxMemstoreTS()) {
fd.minSeqIdToKeep = file.getMaxMemstoreTS();
}
}
long seqNum = file.getMaxSequenceId();
fd.maxSeqId = Math.max(fd.maxSeqId, seqNum);
StoreFileReader r = file.getReader();
if (r == null) {
LOG.warn("Null reader for " + file.getPath());
continue;
}
// NOTE: use getEntries when compacting instead of getFilterEntries, otherwise under-sized
// blooms can cause progress to be miscalculated or if the user switches bloom
// type (e.g. from ROW to ROWCOL)
long keyCount = r.getEntries();
fd.maxKeyCount += keyCount;
// calculate the latest MVCC readpoint in any of the involved store files
Map<byte[], byte[]> fileInfo = r.loadFileInfo();
byte[] tmp = null;
// SeqId number.
if (r.isBulkLoaded()) {
fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, r.getSequenceID());
} else {
tmp = fileInfo.get(HFile.Writer.MAX_MEMSTORE_TS_KEY);
if (tmp != null) {
fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp));
}
}
tmp = fileInfo.get(FileInfo.MAX_TAGS_LEN);
if (tmp != null) {
fd.maxTagsLength = Math.max(fd.maxTagsLength, Bytes.toInt(tmp));
}
// If required, calculate the earliest put timestamp of all involved storefiles.
// This is used to remove family delete marker during compaction.
long earliestPutTs = 0;
if (allFiles) {
tmp = fileInfo.get(StoreFile.EARLIEST_PUT_TS);
if (tmp == null) {
// There's a file with no information, must be an old one
// assume we have very old puts
fd.earliestPutTs = earliestPutTs = HConstants.OLDEST_TIMESTAMP;
} else {
earliestPutTs = Bytes.toLong(tmp);
fd.earliestPutTs = Math.min(fd.earliestPutTs, earliestPutTs);
}
}
tmp = fileInfo.get(StoreFile.TIMERANGE_KEY);
TimeRangeTracker trt = TimeRangeTracker.getTimeRangeTracker(tmp);
fd.latestPutTs = trt == null ? HConstants.LATEST_TIMESTAMP : trt.getMax();
if (LOG.isDebugEnabled()) {
LOG.debug("Compacting " + file + ", keycount=" + keyCount + ", bloomtype=" + r.getBloomFilterType().toString() + ", size=" + TraditionalBinaryPrefix.long2String(r.length(), "", 1) + ", encoding=" + r.getHFileReader().getDataBlockEncoding() + ", seqNum=" + seqNum + (allFiles ? ", earliestPutTs=" + earliestPutTs : ""));
}
}
return fd;
}
use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.
the class DateTieredCompactionPolicy method getCompactBoundariesForMajor.
/**
* Return a list of boundaries for multiple compaction output
* in ascending order.
*/
private List<Long> getCompactBoundariesForMajor(Collection<StoreFile> filesToCompact, long now) {
long minTimestamp = Long.MAX_VALUE;
for (StoreFile file : filesToCompact) {
minTimestamp = Math.min(minTimestamp, file.getMinimumTimestamp() == null ? Long.MAX_VALUE : file.getMinimumTimestamp());
}
List<Long> boundaries = new ArrayList<>();
// Add startMillis of all windows between now and min timestamp
for (CompactionWindow window = getIncomingWindow(now); window.compareToTimestamp(minTimestamp) > 0; window = window.nextEarlierWindow()) {
boundaries.add(window.startMillis());
}
boundaries.add(Long.MIN_VALUE);
Collections.reverse(boundaries);
return boundaries;
}
Aggregations