use of org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionPartition in project hbase by apache.
the class PartitionedMobCompactor method compactMobFiles.
/**
* Compacts the selected small mob files and all the del files.
* @param request The compaction request.
* @return The paths of new mob files after compactions.
* @throws IOException if IO failure is encountered
*/
protected List<Path> compactMobFiles(final PartitionedMobCompactionRequest request) throws IOException {
Collection<CompactionPartition> partitions = request.compactionPartitions;
if (partitions == null || partitions.isEmpty()) {
LOG.info("No partitions of mob files");
return Collections.emptyList();
}
List<Path> paths = new ArrayList<>();
final Connection c = ConnectionFactory.createConnection(conf);
final Table table = c.getTable(tableName);
try {
Map<CompactionPartitionId, Future<List<Path>>> results = new HashMap<>();
// compact the mob files by partitions in parallel.
for (final CompactionPartition partition : partitions) {
// How to efficiently come up a list of delFiles for one partition?
// Search the delPartitions and collect all the delFiles for the partition
// One optimization can do is that if there is no del file, we do not need to
// come up with startKey/endKey.
List<StoreFile> delFiles = getListOfDelFilesForPartition(partition, request.getDelPartitions());
results.put(partition.getPartitionId(), pool.submit(new Callable<List<Path>>() {
@Override
public List<Path> call() throws Exception {
LOG.info("Compacting mob files for partition " + partition.getPartitionId());
return compactMobFilePartition(request, partition, delFiles, c, table);
}
}));
}
// compact the partitions in parallel.
List<CompactionPartitionId> failedPartitions = new ArrayList<>();
for (Entry<CompactionPartitionId, Future<List<Path>>> result : results.entrySet()) {
try {
paths.addAll(result.getValue().get());
} catch (Exception e) {
// just log the error
LOG.error("Failed to compact the partition " + result.getKey(), e);
failedPartitions.add(result.getKey());
}
}
if (!failedPartitions.isEmpty()) {
// if any partition fails in the compaction, directly throw an exception.
throw new IOException("Failed to compact the partitions " + failedPartitions);
}
} finally {
try {
table.close();
} catch (IOException e) {
LOG.error("Failed to close the Table", e);
}
}
return paths;
}
use of org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionPartition in project hbase by apache.
the class TestPartitionedMobCompactor method testSelectFiles.
/**
* Tests the selectFiles
* @param tableName the table name
* @param type the expected compaction type
* @param isForceAllFiles whether all the mob files are selected
* @param expected the expected start keys
*/
private void testSelectFiles(String tableName, final CompactionType type, final boolean isForceAllFiles, final List<String> expected) throws IOException {
PartitionedMobCompactor compactor = new PartitionedMobCompactor(conf, fs, TableName.valueOf(tableName), hcd, pool) {
@Override
public List<Path> compact(List<FileStatus> files, boolean isForceAllFiles) throws IOException {
if (files == null || files.isEmpty()) {
return null;
}
PartitionedMobCompactionRequest request = select(files, isForceAllFiles);
// Make sure that when there is no del files, there will be no startKey/endKey for partition.
if (request.getDelPartitions().size() == 0) {
for (CompactionPartition p : request.getCompactionPartitions()) {
Assert.assertTrue(p.getStartKey() == null);
Assert.assertTrue(p.getEndKey() == null);
}
}
// Make sure that CompactionDelPartitions does not overlap
CompactionDelPartition prevDelP = null;
for (CompactionDelPartition delP : request.getDelPartitions()) {
Assert.assertTrue(Bytes.compareTo(delP.getId().getStartKey(), delP.getId().getEndKey()) <= 0);
if (prevDelP != null) {
Assert.assertTrue(Bytes.compareTo(prevDelP.getId().getEndKey(), delP.getId().getStartKey()) < 0);
}
}
// compact the mob files by partitions in parallel.
for (CompactionPartition partition : request.getCompactionPartitions()) {
List<StoreFile> delFiles = getListOfDelFilesForPartition(partition, request.getDelPartitions());
if (!request.getDelPartitions().isEmpty()) {
if (!((Bytes.compareTo(request.getDelPartitions().get(0).getId().getStartKey(), partition.getEndKey()) > 0) || (Bytes.compareTo(request.getDelPartitions().get(request.getDelPartitions().size() - 1).getId().getEndKey(), partition.getStartKey()) < 0))) {
if (delFiles.size() > 0) {
Assert.assertTrue(Bytes.compareTo(partition.getStartKey(), delFiles.get(0).getFirstKey().getRowArray()) >= 0);
Assert.assertTrue(Bytes.compareTo(partition.getEndKey(), delFiles.get(delFiles.size() - 1).getLastKey().getRowArray()) <= 0);
}
}
}
}
// assert the compaction type
Assert.assertEquals(type, request.type);
// assert get the right partitions
compareCompactedPartitions(expected, request.compactionPartitions);
// assert get the right del files
compareDelFiles(request.getDelPartitions());
return null;
}
};
compactor.compact(allFiles, isForceAllFiles);
}
use of org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionPartition in project hbase by apache.
the class TestPartitionedMobCompactor method testCompactFilesWithoutDelFile.
@Test
public void testCompactFilesWithoutDelFile() throws Exception {
String tableName = "testCompactFilesWithoutDelFile";
resetConf();
init(tableName);
createMobFile(basePath);
listFiles();
PartitionedMobCompactor compactor = new PartitionedMobCompactor(conf, fs, TableName.valueOf(tableName), hcd, pool) {
@Override
public List<Path> compact(List<FileStatus> files, boolean isForceAllFiles) throws IOException {
if (files == null || files.isEmpty()) {
return null;
}
PartitionedMobCompactionRequest request = select(files, isForceAllFiles);
// Make sure that there is no del Partitions
Assert.assertTrue(request.getDelPartitions().size() == 0);
// Make sure that when there is no startKey/endKey for partition.
for (CompactionPartition p : request.getCompactionPartitions()) {
Assert.assertTrue(p.getStartKey() == null);
Assert.assertTrue(p.getEndKey() == null);
}
return null;
}
};
compactor.compact(allFiles, true);
}
use of org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionPartition in project hbase by apache.
the class PartitionedMobCompactor method select.
/**
* Selects the compacted mob/del files.
* Iterates the candidates to find out all the del files and small mob files.
* @param candidates All the candidates.
* @param allFiles Whether add all mob files into the compaction.
* @return A compaction request.
* @throws IOException if IO failure is encountered
*/
protected PartitionedMobCompactionRequest select(List<FileStatus> candidates, boolean allFiles) throws IOException {
final Map<CompactionPartitionId, CompactionPartition> filesToCompact = new HashMap<>();
final CompactionPartitionId id = new CompactionPartitionId();
final NavigableMap<CompactionDelPartitionId, CompactionDelPartition> delFilesToCompact = new TreeMap<>();
final CompactionDelPartitionId delId = new CompactionDelPartitionId();
final ArrayList<CompactionDelPartition> allDelPartitions = new ArrayList<>();
int selectedFileCount = 0;
int irrelevantFileCount = 0;
int totalDelFiles = 0;
MobCompactPartitionPolicy policy = column.getMobCompactPartitionPolicy();
Calendar calendar = Calendar.getInstance();
Date currentDate = new Date();
Date firstDayOfCurrentMonth = null;
Date firstDayOfCurrentWeek = null;
if (policy == MobCompactPartitionPolicy.MONTHLY) {
firstDayOfCurrentMonth = MobUtils.getFirstDayOfMonth(calendar, currentDate);
firstDayOfCurrentWeek = MobUtils.getFirstDayOfWeek(calendar, currentDate);
} else if (policy == MobCompactPartitionPolicy.WEEKLY) {
firstDayOfCurrentWeek = MobUtils.getFirstDayOfWeek(calendar, currentDate);
}
// We check if there is any del files so the logic can be optimized for the following processing
// First step is to check if there is any delete files. If there is any delete files,
// For each Partition, it needs to read its startKey and endKey from files.
// If there is no delete file, there is no need to read startKey and endKey from files, this
// is an optimization.
boolean withDelFiles = false;
for (FileStatus file : candidates) {
if (!file.isFile()) {
continue;
}
// group the del files and small files.
FileStatus linkedFile = file;
if (HFileLink.isHFileLink(file.getPath())) {
HFileLink link = HFileLink.buildFromHFileLinkPattern(conf, file.getPath());
linkedFile = getLinkedFileStatus(link);
if (linkedFile == null) {
continue;
}
}
if (StoreFileInfo.isDelFile(linkedFile.getPath())) {
withDelFiles = true;
break;
}
}
for (FileStatus file : candidates) {
if (!file.isFile()) {
irrelevantFileCount++;
continue;
}
// group the del files and small files.
FileStatus linkedFile = file;
if (HFileLink.isHFileLink(file.getPath())) {
HFileLink link = HFileLink.buildFromHFileLinkPattern(conf, file.getPath());
linkedFile = getLinkedFileStatus(link);
if (linkedFile == null) {
// If the linked file cannot be found, regard it as an irrelevantFileCount file
irrelevantFileCount++;
continue;
}
}
if (withDelFiles && StoreFileInfo.isDelFile(linkedFile.getPath())) {
// File in the Del Partition List
// Get delId from the file
Reader reader = HFile.createReader(fs, linkedFile.getPath(), CacheConfig.DISABLED, conf);
try {
delId.setStartKey(reader.getFirstRowKey());
delId.setEndKey(reader.getLastRowKey());
} finally {
reader.close();
}
CompactionDelPartition delPartition = delFilesToCompact.get(delId);
if (delPartition == null) {
CompactionDelPartitionId newDelId = new CompactionDelPartitionId(delId.getStartKey(), delId.getEndKey());
delPartition = new CompactionDelPartition(newDelId);
delFilesToCompact.put(newDelId, delPartition);
}
delPartition.addDelFile(file);
totalDelFiles++;
} else {
String fileName = linkedFile.getPath().getName();
String date = MobFileName.getDateFromName(fileName);
boolean skipCompaction = MobUtils.fillPartitionId(id, firstDayOfCurrentMonth, firstDayOfCurrentWeek, date, policy, calendar, mergeableSize);
if (allFiles || (!skipCompaction && (linkedFile.getLen() < id.getThreshold()))) {
// add all files if allFiles is true,
// otherwise add the small files to the merge pool
// filter out files which are not supposed to be compacted with the
// current policy
id.setStartKey(MobFileName.getStartKeyFromName(fileName));
CompactionPartition compactionPartition = filesToCompact.get(id);
if (compactionPartition == null) {
CompactionPartitionId newId = new CompactionPartitionId(id.getStartKey(), id.getDate());
compactionPartition = new CompactionPartition(newId);
compactionPartition.addFile(file);
filesToCompact.put(newId, compactionPartition);
newId.updateLatestDate(date);
} else {
compactionPartition.addFile(file);
compactionPartition.getPartitionId().updateLatestDate(date);
}
if (withDelFiles) {
// get startKey and endKey from the file and update partition
// TODO: is it possible to skip read of most hfiles?
Reader reader = HFile.createReader(fs, linkedFile.getPath(), CacheConfig.DISABLED, conf);
try {
compactionPartition.setStartKey(reader.getFirstRowKey());
compactionPartition.setEndKey(reader.getLastRowKey());
} finally {
reader.close();
}
}
selectedFileCount++;
}
}
}
/*
* Merge del files so there are only non-overlapped del file lists
*/
for (Map.Entry<CompactionDelPartitionId, CompactionDelPartition> entry : delFilesToCompact.entrySet()) {
if (allDelPartitions.size() > 0) {
// check if the current key range overlaps the previous one
CompactionDelPartition prev = allDelPartitions.get(allDelPartitions.size() - 1);
if (Bytes.compareTo(prev.getId().getEndKey(), entry.getKey().getStartKey()) >= 0) {
// merge them together
prev.getId().setEndKey(entry.getValue().getId().getEndKey());
prev.addDelFileList(entry.getValue().listDelFiles());
} else {
allDelPartitions.add(entry.getValue());
}
} else {
allDelPartitions.add(entry.getValue());
}
}
PartitionedMobCompactionRequest request = new PartitionedMobCompactionRequest(filesToCompact.values(), allDelPartitions);
if (candidates.size() == (totalDelFiles + selectedFileCount + irrelevantFileCount)) {
// all the files are selected
request.setCompactionType(CompactionType.ALL_FILES);
}
LOG.info("The compaction type is " + request.getCompactionType() + ", the request has " + totalDelFiles + " del files, " + selectedFileCount + " selected files, and " + irrelevantFileCount + " irrelevant files");
return request;
}
use of org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionPartition in project hbase by apache.
the class TestPartitionedMobCompactionRequest method testCompactedPartition.
@Test
public void testCompactedPartition() {
CompactionPartitionId partitionId = new CompactionPartitionId("startKey1", "date1");
CompactionPartition partition = new CompactionPartition(partitionId);
FileStatus file = new FileStatus(1, false, 1, 1024, 1, new Path("/test"));
partition.addFile(file);
Assert.assertEquals(file, partition.listFiles().get(0));
}
Aggregations