use of org.apache.accumulo.core.util.compaction.CompactionExecutorIdImpl in project accumulo by apache.
the class CompactionService method submitCompactionJob.
private void submitCompactionJob(CompactionPlan plan, Compactable.Files files, Compactable compactable, Consumer<Compactable> completionCallback) {
// log error if tablet is metadata and compaction is external
var execIds = plan.getJobs().stream().map(cj -> (CompactionExecutorIdImpl) cj.getExecutor());
if (compactable.getExtent().isMeta() && execIds.anyMatch(ceid -> ceid.isExternalId())) {
log.error("Compacting metadata tablets on external compactors is not supported, please change " + "config for compaction service ({}) and/or table ASAP. {} is not compacting, " + "ignoring plan {}", myId, compactable.getExtent(), plan);
return;
}
Set<CompactionJob> jobs = new HashSet<>(plan.getJobs());
Collection<SubmittedJob> submitted = submittedJobs.getOrDefault(compactable.getExtent(), List.of());
if (!submitted.isEmpty()) {
submitted.removeIf(sj -> {
// to avoid race conditions, only read status once and use local var for the two compares
var status = sj.getStatus();
return status != Status.QUEUED && status != Status.RUNNING;
});
}
if (reconcile(jobs, submitted)) {
for (CompactionJob job : jobs) {
CompactionExecutor executor = executors.get(job.getExecutor());
var submittedJob = executor.submit(myId, job, compactable, completionCallback);
// its important that the collection created in computeIfAbsent supports concurrency
submittedJobs.computeIfAbsent(compactable.getExtent(), k -> new ConcurrentLinkedQueue<>()).add(submittedJob);
}
if (!jobs.isEmpty()) {
log.trace("Submitted compaction plan {} id:{} files:{} plan:{}", compactable.getExtent(), myId, files, plan);
}
} else {
log.trace("Did not submit compaction plan {} id:{} files:{} plan:{}", compactable.getExtent(), myId, files, plan);
}
}
use of org.apache.accumulo.core.util.compaction.CompactionExecutorIdImpl in project accumulo by apache.
the class ExternalCompactionExecutor method summarize.
public Stream<TCompactionQueueSummary> summarize() {
HashSet<Short> uniqPrios = new HashSet<Short>();
queuedJob.forEach(job -> uniqPrios.add(job.getJob().getPriority()));
Stream<Short> prioStream = uniqPrios.stream();
if (uniqPrios.size() > 100) {
// Send the 100 highest priorities to the
// coordinator to avoid causing it run out of memory
prioStream = prioStream.sorted(Comparator.reverseOrder()).limit(100);
}
String queueName = ((CompactionExecutorIdImpl) ceid).getExternalName();
return prioStream.map(prio -> new TCompactionQueueSummary(queueName, prio));
}
Aggregations