use of org.apache.accumulo.core.spi.compaction.CompactionJob in project accumulo by apache.
the class CompactionService method submitCompactionJob.
private void submitCompactionJob(CompactionPlan plan, Compactable.Files files, Compactable compactable, Consumer<Compactable> completionCallback) {
// log error if tablet is metadata and compaction is external
var execIds = plan.getJobs().stream().map(cj -> (CompactionExecutorIdImpl) cj.getExecutor());
if (compactable.getExtent().isMeta() && execIds.anyMatch(ceid -> ceid.isExternalId())) {
log.error("Compacting metadata tablets on external compactors is not supported, please change " + "config for compaction service ({}) and/or table ASAP. {} is not compacting, " + "ignoring plan {}", myId, compactable.getExtent(), plan);
return;
}
Set<CompactionJob> jobs = new HashSet<>(plan.getJobs());
Collection<SubmittedJob> submitted = submittedJobs.getOrDefault(compactable.getExtent(), List.of());
if (!submitted.isEmpty()) {
submitted.removeIf(sj -> {
// to avoid race conditions, only read status once and use local var for the two compares
var status = sj.getStatus();
return status != Status.QUEUED && status != Status.RUNNING;
});
}
if (reconcile(jobs, submitted)) {
for (CompactionJob job : jobs) {
CompactionExecutor executor = executors.get(job.getExecutor());
var submittedJob = executor.submit(myId, job, compactable, completionCallback);
// its important that the collection created in computeIfAbsent supports concurrency
submittedJobs.computeIfAbsent(compactable.getExtent(), k -> new ConcurrentLinkedQueue<>()).add(submittedJob);
}
if (!jobs.isEmpty()) {
log.trace("Submitted compaction plan {} id:{} files:{} plan:{}", compactable.getExtent(), myId, files, plan);
}
} else {
log.trace("Did not submit compaction plan {} id:{} files:{} plan:{}", compactable.getExtent(), myId, files, plan);
}
}
Aggregations