use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.
the class ExpiredMobFileCleaner method cleanExpiredMobFiles.
/**
* Cleans the MOB files when they're expired and their min versions are 0.
* If the latest timestamp of Cells in a MOB file is older than the TTL in the column family,
* it's regarded as expired. This cleaner deletes them.
* At a time T0, the cells in a mob file M0 are expired. If a user starts a scan before T0, those
* mob cells are visible, this scan still runs after T0. At that time T1, this mob file M0
* is expired, meanwhile a cleaner starts, the M0 is archived and can be read in the archive
* directory.
* @param tableName The current table name.
* @param family The current family.
*/
public void cleanExpiredMobFiles(String tableName, HColumnDescriptor family) throws IOException {
Configuration conf = getConf();
TableName tn = TableName.valueOf(tableName);
FileSystem fs = FileSystem.get(conf);
LOG.info("Cleaning the expired MOB files of " + family.getNameAsString() + " in " + tableName);
// disable the block cache.
Configuration copyOfConf = new Configuration(conf);
copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f);
CacheConfig cacheConfig = new CacheConfig(copyOfConf);
MobUtils.cleanExpiredMobFiles(fs, conf, tn, family, cacheConfig, EnvironmentEdgeManager.currentTime());
}
use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.
the class CompressionTest method doSmokeTest.
public static void doSmokeTest(FileSystem fs, Path path, String codec) throws Exception {
Configuration conf = HBaseConfiguration.create();
HFileContext context = new HFileContextBuilder().withCompression(HFileWriterImpl.compressionByName(codec)).build();
HFile.Writer writer = HFile.getWriterFactoryNoCache(conf).withPath(fs, path).withFileContext(context).create();
// Write any-old Cell...
final byte[] rowKey = Bytes.toBytes("compressiontestkey");
Cell c = CellUtil.createCell(rowKey, Bytes.toBytes("compressiontestval"));
writer.append(c);
writer.appendFileInfo(Bytes.toBytes("compressioninfokey"), Bytes.toBytes("compressioninfoval"));
writer.close();
Cell cc = null;
HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
try {
reader.loadFileInfo();
HFileScanner scanner = reader.getScanner(false, true);
// position to the start of file
scanner.seekTo();
// Scanner does not do Cells yet. Do below for now till fixed.
cc = scanner.getCell();
if (CellComparator.COMPARATOR.compareRows(c, cc) != 0) {
throw new Exception("Read back incorrect result: " + c.toString() + " vs " + cc.toString());
}
} finally {
reader.close();
}
}
use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.
the class MergeTableRegionsProcedure method mergeStoreFiles.
/**
* Create reference file(s) of merging regions under the merges directory
* @param env MasterProcedureEnv
* @param regionFs region file system
* @param mergedDir the temp directory of merged region
* @throws IOException
*/
private void mergeStoreFiles(final MasterProcedureEnv env, final HRegionFileSystem regionFs, final Path mergedDir) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
final Configuration conf = env.getMasterConfiguration();
final HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
for (String family : regionFs.getFamilies()) {
final HColumnDescriptor hcd = htd.getFamily(family.getBytes());
final Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family);
if (storeFiles != null && storeFiles.size() > 0) {
final CacheConfig cacheConf = new CacheConfig(conf, hcd);
for (StoreFileInfo storeFileInfo : storeFiles) {
// Create reference file(s) of the region in mergedDir
regionFs.mergeStoreFile(mergedRegionInfo, family, new StoreFile(mfs.getFileSystem(), storeFileInfo, conf, cacheConf, hcd.getBloomFilterType()), mergedDir);
}
}
}
}
use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.
the class LoadIncrementalHFiles method groupOrSplit.
/**
* Attempt to assign the given load queue item into its target region group.
* If the hfile boundary no longer fits into a region, physically splits
* the hfile such that the new bottom half will fit and returns the list of
* LQI's corresponding to the resultant hfiles.
*
* protected for testing
* @throws IOException if an IO failure is encountered
*/
protected Pair<List<LoadQueueItem>, String> groupOrSplit(Multimap<ByteBuffer, LoadQueueItem> regionGroups, final LoadQueueItem item, final Table table, final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
final Path hfilePath = item.hfilePath;
// fs is the source filesystem
if (fs == null) {
fs = hfilePath.getFileSystem(getConf());
}
HFile.Reader hfr = null;
try {
hfr = HFile.createReader(fs, hfilePath, new CacheConfig(getConf()), getConf());
} catch (FileNotFoundException fnfe) {
LOG.debug("encountered", fnfe);
return new Pair<>(null, hfilePath.getName());
}
final byte[] first, last;
try {
hfr.loadFileInfo();
first = hfr.getFirstRowKey();
last = hfr.getLastRowKey();
} finally {
hfr.close();
}
LOG.info("Trying to load hfile=" + hfilePath + " first=" + Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last));
if (first == null || last == null) {
assert first == null && last == null;
// TODO what if this is due to a bad HFile?
LOG.info("hfile " + hfilePath + " has no entries, skipping");
return null;
}
if (Bytes.compareTo(first, last) > 0) {
throw new IllegalArgumentException("Invalid range: " + Bytes.toStringBinary(first) + " > " + Bytes.toStringBinary(last));
}
int idx = Arrays.binarySearch(startEndKeys.getFirst(), first, Bytes.BYTES_COMPARATOR);
if (idx < 0) {
// not on boundary, returns -(insertion index). Calculate region it
// would be in.
idx = -(idx + 1) - 1;
}
final int indexForCallable = idx;
/**
* we can consider there is a region hole in following conditions. 1) if idx < 0,then first
* region info is lost. 2) if the endkey of a region is not equal to the startkey of the next
* region. 3) if the endkey of the last region is not empty.
*/
if (indexForCallable < 0) {
throw new IOException("The first region info for table " + table.getName() + " can't be found in hbase:meta.Please use hbck tool to fix it first.");
} else if ((indexForCallable == startEndKeys.getFirst().length - 1) && !Bytes.equals(startEndKeys.getSecond()[indexForCallable], HConstants.EMPTY_BYTE_ARRAY)) {
throw new IOException("The last region info for table " + table.getName() + " can't be found in hbase:meta.Please use hbck tool to fix it first.");
} else if (indexForCallable + 1 < startEndKeys.getFirst().length && !(Bytes.compareTo(startEndKeys.getSecond()[indexForCallable], startEndKeys.getFirst()[indexForCallable + 1]) == 0)) {
throw new IOException("The endkey of one region for table " + table.getName() + " is not equal to the startkey of the next region in hbase:meta." + "Please use hbck tool to fix it first.");
}
boolean lastKeyInRange = Bytes.compareTo(last, startEndKeys.getSecond()[idx]) < 0 || Bytes.equals(startEndKeys.getSecond()[idx], HConstants.EMPTY_BYTE_ARRAY);
if (!lastKeyInRange) {
List<LoadQueueItem> lqis = splitStoreFile(item, table, startEndKeys.getFirst()[indexForCallable], startEndKeys.getSecond()[indexForCallable]);
return new Pair<>(lqis, null);
}
// group regions.
regionGroups.put(ByteBuffer.wrap(startEndKeys.getFirst()[idx]), item);
return null;
}
use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.
the class LoadIncrementalHFiles method copyHFileHalf.
/**
* Copy half of an HFile into a new HFile.
*/
private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile, Reference reference, HColumnDescriptor familyDescriptor) throws IOException {
FileSystem fs = inFile.getFileSystem(conf);
CacheConfig cacheConf = new CacheConfig(conf);
HalfStoreFileReader halfReader = null;
StoreFileWriter halfWriter = null;
try {
halfReader = new HalfStoreFileReader(fs, inFile, cacheConf, reference, conf);
Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();
int blocksize = familyDescriptor.getBlocksize();
Algorithm compression = familyDescriptor.getCompressionType();
BloomType bloomFilterType = familyDescriptor.getBloomFilterType();
HFileContext hFileContext = new HFileContextBuilder().withCompression(compression).withChecksumType(HStore.getChecksumType(conf)).withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)).withBlockSize(blocksize).withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()).withIncludesTags(true).build();
halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile).withBloomType(bloomFilterType).withFileContext(hFileContext).build();
HFileScanner scanner = halfReader.getScanner(false, false, false);
scanner.seekTo();
do {
halfWriter.append(scanner.getCell());
} while (scanner.next());
for (Map.Entry<byte[], byte[]> entry : fileInfo.entrySet()) {
if (shouldCopyHFileMetaKey(entry.getKey())) {
halfWriter.appendFileInfo(entry.getKey(), entry.getValue());
}
}
} finally {
if (halfWriter != null) {
halfWriter.close();
}
if (halfReader != null) {
halfReader.close(cacheConf.shouldEvictOnClose());
}
}
}
Aggregations