use of java.util.PriorityQueue in project guava by hceylan.
the class MinMaxPriorityQueueTest method testCorrectOrdering_randomAccess.
public void testCorrectOrdering_randomAccess() {
long seed = new Random().nextLong();
Random random = new Random(seed);
PriorityQueue<Integer> control = new PriorityQueue<Integer>();
MinMaxPriorityQueue<Integer> q = MinMaxPriorityQueue.create();
for (int i = 0; i < 73; i++) {
// 73 is a childless uncle case.
Integer element = random.nextInt();
control.add(element);
assertTrue(q.add(element));
}
assertTrue("State " + Arrays.toString(q.toArray()), q.isIntact());
for (int i = 0; i < 500000; i++) {
if (random.nextBoolean()) {
Integer element = random.nextInt();
control.add(element);
q.add(element);
} else {
assertEquals("Using seed " + seed, control.poll(), q.pollFirst());
}
}
while (!control.isEmpty()) {
assertEquals("Using seed " + seed, control.poll(), q.pollFirst());
}
assertTrue(q.isEmpty());
}
use of java.util.PriorityQueue in project intellij-community by JetBrains.
the class BreadcrumbsXmlWrapper method findFirstBreadcrumbedElement.
@Nullable
private static PsiElement findFirstBreadcrumbedElement(final int offset, final VirtualFile file, final Project project, final BreadcrumbsInfoProvider defaultInfoProvider) {
if (file == null || !file.isValid())
return null;
PriorityQueue<PsiElement> leafs = new PriorityQueue<>(3, (o1, o2) -> {
TextRange range1 = o1.getTextRange();
if (range1 == null) {
LOG.error(o1 + " returned null range");
return 1;
}
TextRange range2 = o2.getTextRange();
if (range2 == null) {
LOG.error(o2 + " returned null range");
return -1;
}
return range2.getStartOffset() - range1.getStartOffset();
});
FileViewProvider viewProvider = findViewProvider(file, project);
if (viewProvider == null)
return null;
for (final Language language : viewProvider.getLanguages()) {
ContainerUtil.addIfNotNull(leafs, viewProvider.findElementAt(offset, language));
}
while (!leafs.isEmpty()) {
final PsiElement element = leafs.remove();
if (!element.isValid())
continue;
BreadcrumbsInfoProvider provider = findProviderForElement(element, defaultInfoProvider);
if (provider != null && provider.acceptElement(element)) {
return element;
}
if (!(element instanceof PsiFile)) {
ContainerUtil.addIfNotNull(leafs, getParent(element, provider));
}
}
return null;
}
use of java.util.PriorityQueue in project asterixdb by apache.
the class JobExecutor method startRunnableTaskClusters.
private void startRunnableTaskClusters(Set<TaskCluster> tcRoots) throws HyracksException {
Map<TaskCluster, Runnability> runnabilityMap = new HashMap<>();
for (TaskCluster tc : tcRoots) {
assignRunnabilityRank(tc, runnabilityMap);
}
PriorityQueue<RankedRunnableTaskCluster> queue = new PriorityQueue<>();
for (Map.Entry<TaskCluster, Runnability> e : runnabilityMap.entrySet()) {
TaskCluster tc = e.getKey();
Runnability runnability = e.getValue();
if (runnability.getTag() != Runnability.Tag.RUNNABLE) {
continue;
}
int priority = runnability.getPriority();
if (priority >= 0 && priority < Integer.MAX_VALUE) {
queue.add(new RankedRunnableTaskCluster(priority, tc));
}
}
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("Ranked TCs: " + queue);
}
Map<String, List<TaskAttemptDescriptor>> taskAttemptMap = new HashMap<>();
for (RankedRunnableTaskCluster rrtc : queue) {
TaskCluster tc = rrtc.getTaskCluster();
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("Found runnable TC: " + tc);
List<TaskClusterAttempt> attempts = tc.getAttempts();
LOGGER.fine("Attempts so far:" + attempts.size());
for (TaskClusterAttempt tcAttempt : attempts) {
LOGGER.fine("Status: " + tcAttempt.getStatus());
}
}
assignTaskLocations(tc, taskAttemptMap);
}
if (taskAttemptMap.isEmpty()) {
return;
}
startTasks(taskAttemptMap);
}
use of java.util.PriorityQueue in project drill by apache.
the class HBaseGroupScan method applyAssignments.
/**
*
* @param incomingEndpoints
*/
@Override
public void applyAssignments(List<DrillbitEndpoint> incomingEndpoints) {
watch.reset();
watch.start();
final int numSlots = incomingEndpoints.size();
Preconditions.checkArgument(numSlots <= regionsToScan.size(), String.format("Incoming endpoints %d is greater than number of scan regions %d", numSlots, regionsToScan.size()));
/*
* Minimum/Maximum number of assignment per slot
*/
final int minPerEndpointSlot = (int) Math.floor((double) regionsToScan.size() / numSlots);
final int maxPerEndpointSlot = (int) Math.ceil((double) regionsToScan.size() / numSlots);
/*
* initialize (endpoint index => HBaseSubScanSpec list) map
*/
endpointFragmentMapping = Maps.newHashMapWithExpectedSize(numSlots);
/*
* another map with endpoint (hostname => corresponding index list) in 'incomingEndpoints' list
*/
Map<String, Queue<Integer>> endpointHostIndexListMap = Maps.newHashMap();
/*
* Initialize these two maps
*/
for (int i = 0; i < numSlots; ++i) {
endpointFragmentMapping.put(i, new ArrayList<HBaseSubScanSpec>(maxPerEndpointSlot));
String hostname = incomingEndpoints.get(i).getAddress();
Queue<Integer> hostIndexQueue = endpointHostIndexListMap.get(hostname);
if (hostIndexQueue == null) {
hostIndexQueue = Lists.newLinkedList();
endpointHostIndexListMap.put(hostname, hostIndexQueue);
}
hostIndexQueue.add(i);
}
Set<Entry<HRegionInfo, ServerName>> regionsToAssignSet = Sets.newHashSet(regionsToScan.entrySet());
/*
* First, we assign regions which are hosted on region servers running on drillbit endpoints
*/
for (Iterator<Entry<HRegionInfo, ServerName>> regionsIterator = regionsToAssignSet.iterator(); regionsIterator.hasNext(); ) /*nothing*/
{
Entry<HRegionInfo, ServerName> regionEntry = regionsIterator.next();
/*
* Test if there is a drillbit endpoint which is also an HBase RegionServer that hosts the current HBase region
*/
Queue<Integer> endpointIndexlist = endpointHostIndexListMap.get(regionEntry.getValue().getHostname());
if (endpointIndexlist != null) {
Integer slotIndex = endpointIndexlist.poll();
List<HBaseSubScanSpec> endpointSlotScanList = endpointFragmentMapping.get(slotIndex);
endpointSlotScanList.add(regionInfoToSubScanSpec(regionEntry.getKey()));
// add to the tail of the slot list, to add more later in round robin fashion
endpointIndexlist.offer(slotIndex);
// this region has been assigned
regionsIterator.remove();
}
}
/*
* Build priority queues of slots, with ones which has tasks lesser than 'minPerEndpointSlot' and another which have more.
*/
PriorityQueue<List<HBaseSubScanSpec>> minHeap = new PriorityQueue<List<HBaseSubScanSpec>>(numSlots, LIST_SIZE_COMPARATOR);
PriorityQueue<List<HBaseSubScanSpec>> maxHeap = new PriorityQueue<List<HBaseSubScanSpec>>(numSlots, LIST_SIZE_COMPARATOR_REV);
for (List<HBaseSubScanSpec> listOfScan : endpointFragmentMapping.values()) {
if (listOfScan.size() < minPerEndpointSlot) {
minHeap.offer(listOfScan);
} else if (listOfScan.size() > minPerEndpointSlot) {
maxHeap.offer(listOfScan);
}
}
/*
* Now, let's process any regions which remain unassigned and assign them to slots with minimum number of assignments.
*/
if (regionsToAssignSet.size() > 0) {
for (Entry<HRegionInfo, ServerName> regionEntry : regionsToAssignSet) {
List<HBaseSubScanSpec> smallestList = minHeap.poll();
smallestList.add(regionInfoToSubScanSpec(regionEntry.getKey()));
if (smallestList.size() < maxPerEndpointSlot) {
minHeap.offer(smallestList);
}
}
}
/*
* While there are slots with lesser than 'minPerEndpointSlot' unit work, balance from those with more.
*/
while (minHeap.peek() != null && minHeap.peek().size() < minPerEndpointSlot) {
List<HBaseSubScanSpec> smallestList = minHeap.poll();
List<HBaseSubScanSpec> largestList = maxHeap.poll();
smallestList.add(largestList.remove(largestList.size() - 1));
if (largestList.size() > minPerEndpointSlot) {
maxHeap.offer(largestList);
}
if (smallestList.size() < minPerEndpointSlot) {
minHeap.offer(smallestList);
}
}
/* no slot should be empty at this point */
assert (minHeap.peek() == null || minHeap.peek().size() > 0) : String.format("Unable to assign tasks to some endpoints.\nEndpoints: {}.\nAssignment Map: {}.", incomingEndpoints, endpointFragmentMapping.toString());
logger.debug("Built assignment map in {} µs.\nEndpoints: {}.\nAssignment Map: {}", watch.elapsed(TimeUnit.NANOSECONDS) / 1000, incomingEndpoints, endpointFragmentMapping.toString());
}
use of java.util.PriorityQueue in project lucene-solr by apache.
the class TopTermsRewrite method rewrite.
@Override
public final Query rewrite(final IndexReader reader, final MultiTermQuery query) throws IOException {
final int maxSize = Math.min(size, getMaxSize());
final PriorityQueue<ScoreTerm> stQueue = new PriorityQueue<>();
collectTerms(reader, query, new TermCollector() {
private final MaxNonCompetitiveBoostAttribute maxBoostAtt = attributes.addAttribute(MaxNonCompetitiveBoostAttribute.class);
private final Map<BytesRef, ScoreTerm> visitedTerms = new HashMap<>();
private TermsEnum termsEnum;
private BoostAttribute boostAtt;
private ScoreTerm st;
@Override
public void setNextEnum(TermsEnum termsEnum) {
this.termsEnum = termsEnum;
assert compareToLastTerm(null);
// lazy init the initial ScoreTerm because comparator is not known on ctor:
if (st == null)
st = new ScoreTerm(new TermContext(topReaderContext));
boostAtt = termsEnum.attributes().addAttribute(BoostAttribute.class);
}
// for assert:
private BytesRefBuilder lastTerm;
private boolean compareToLastTerm(BytesRef t) {
if (lastTerm == null && t != null) {
lastTerm = new BytesRefBuilder();
lastTerm.append(t);
} else if (t == null) {
lastTerm = null;
} else {
assert lastTerm.get().compareTo(t) < 0 : "lastTerm=" + lastTerm + " t=" + t;
lastTerm.copyBytes(t);
}
return true;
}
@Override
public boolean collect(BytesRef bytes) throws IOException {
final float boost = boostAtt.getBoost();
// terms in order
assert compareToLastTerm(bytes);
// ignore uncompetitive hits
if (stQueue.size() == maxSize) {
final ScoreTerm t = stQueue.peek();
if (boost < t.boost)
return true;
if (boost == t.boost && bytes.compareTo(t.bytes.get()) > 0)
return true;
}
ScoreTerm t = visitedTerms.get(bytes);
final TermState state = termsEnum.termState();
assert state != null;
if (t != null) {
// if the term is already in the PQ, only update docFreq of term in PQ
assert t.boost == boost : "boost should be equal in all segment TermsEnums";
t.termState.register(state, readerContext.ord, termsEnum.docFreq(), termsEnum.totalTermFreq());
} else {
// add new entry in PQ, we must clone the term, else it may get overwritten!
st.bytes.copyBytes(bytes);
st.boost = boost;
visitedTerms.put(st.bytes.get(), st);
assert st.termState.docFreq() == 0;
st.termState.register(state, readerContext.ord, termsEnum.docFreq(), termsEnum.totalTermFreq());
stQueue.offer(st);
// possibly drop entries from queue
if (stQueue.size() > maxSize) {
st = stQueue.poll();
visitedTerms.remove(st.bytes.get());
// reset the termstate!
st.termState.clear();
} else {
st = new ScoreTerm(new TermContext(topReaderContext));
}
assert stQueue.size() <= maxSize : "the PQ size must be limited to maxSize";
// set maxBoostAtt with values to help FuzzyTermsEnum to optimize
if (stQueue.size() == maxSize) {
t = stQueue.peek();
maxBoostAtt.setMaxNonCompetitiveBoost(t.boost);
maxBoostAtt.setCompetitiveTerm(t.bytes.get());
}
}
return true;
}
});
final B b = getTopLevelBuilder();
final ScoreTerm[] scoreTerms = stQueue.toArray(new ScoreTerm[stQueue.size()]);
ArrayUtil.timSort(scoreTerms, scoreTermSortByTermComp);
for (final ScoreTerm st : scoreTerms) {
final Term term = new Term(query.field, st.bytes.toBytesRef());
// We allow negative term scores (fuzzy query does this, for example) while collecting the terms,
// but truncate such boosts to 0.0f when building the query:
// add to query
addClause(b, term, st.termState.docFreq(), Math.max(0.0f, st.boost), st.termState);
}
return build(b);
}
Aggregations