use of java.util.PriorityQueue in project LogisticsPipes by RS485.
the class RequestTreeNode method checkCrafting.
private boolean checkCrafting() {
// get all the routers
BitSet routersIndex = ServerRouter.getRoutersInterestedIn(getRequestType());
// get the routing table
List<ExitRoute> validSources = new ArrayList<>();
for (int i = routersIndex.nextSetBit(0); i >= 0; i = routersIndex.nextSetBit(i + 1)) {
IRouter r = SimpleServiceLocator.routerManager.getRouterUnsafe(i, false);
if (!r.isValidCache()) {
//Skip Routers without a valid pipe
continue;
}
List<ExitRoute> e = getRequestType().getRouter().getDistanceTo(r);
if (e != null) {
validSources.addAll(e);
}
}
// distance doesn't matter, because ingredients have to be delivered to the crafter, and we can't tell how long that will take.
workWeightedSorter wSorter = new workWeightedSorter(0);
Collections.sort(validSources, wSorter);
List<Pair<ICraftingTemplate, List<IFilter>>> allCraftersForItem = RequestTreeNode.getCrafters(getRequestType(), validSources);
// if you have a crafter which can make the top treeNode.getStack().getItem()
Iterator<Pair<ICraftingTemplate, List<IFilter>>> iterAllCrafters = allCraftersForItem.iterator();
//a queue to store the crafters, sorted by todo; we will fill up from least-most in a balanced way.
PriorityQueue<CraftingSorterNode> craftersSamePriority = new PriorityQueue<>(5);
ArrayList<CraftingSorterNode> craftersToBalance = new ArrayList<>();
//TODO ^ Make this a generic list
boolean done = false;
Pair<ICraftingTemplate, List<IFilter>> lastCrafter = null;
int currentPriority = 0;
outer: while (!done) {
/// First: Create a list of all crafters with the same priority (craftersSamePriority).
if (iterAllCrafters.hasNext()) {
if (lastCrafter == null) {
lastCrafter = iterAllCrafters.next();
}
} else if (lastCrafter == null) {
done = true;
}
int itemsNeeded = getMissingAmount();
if (lastCrafter != null && (craftersSamePriority.isEmpty() || (currentPriority == lastCrafter.getValue1().getPriority()))) {
currentPriority = lastCrafter.getValue1().getPriority();
Pair<ICraftingTemplate, List<IFilter>> crafter = lastCrafter;
lastCrafter = null;
ICraftingTemplate template = crafter.getValue1();
if (isCrafterUsed(template)) {
continue;
}
if (!template.canCraft(getRequestType())) {
// we this is crafting something else
continue;
}
for (IFilter filter : crafter.getValue2()) {
// is this filtered for some reason.
if (filter.isBlocked() == filter.isFilteredItem(template.getResultItem()) || filter.blockCrafting()) {
continue outer;
}
}
CraftingSorterNode cn = new CraftingSorterNode(crafter, itemsNeeded, root, this);
// if(cn.getWorkSetsAvailableForCrafting()>0)
craftersSamePriority.add(cn);
continue;
}
if (craftersToBalance.isEmpty() && (craftersSamePriority == null || craftersSamePriority.isEmpty())) {
//nothing at this priority was available for crafting
continue;
}
if (craftersSamePriority.size() == 1) {
// then no need to balance.
craftersToBalance.add(craftersSamePriority.poll());
// automatically capped at the real amount of extra work.
craftersToBalance.get(0).addToWorkRequest(itemsNeeded);
} else {
// or the amount of work they have is equal to the next-least busy crafter. then pull the next crafter and repeat.
if (!craftersSamePriority.isEmpty()) {
craftersToBalance.add(craftersSamePriority.poll());
}
// while we crafters that can work and we have work to do.
while (!craftersToBalance.isEmpty() && itemsNeeded > 0) {
// typically pulls 1 at a time, but may pull multiple, if they have the exact same todo.
while (!craftersSamePriority.isEmpty() && craftersSamePriority.peek().currentToDo() <= craftersToBalance.get(0).currentToDo()) {
craftersToBalance.add(craftersSamePriority.poll());
}
// find the most we can add this iteration
int cap;
if (!craftersSamePriority.isEmpty()) {
cap = craftersSamePriority.peek().currentToDo();
} else {
cap = Integer.MAX_VALUE;
}
//split the work between N crafters, up to "cap" (at which point we would be dividing the work between N+1 crafters.
int floor = craftersToBalance.get(0).currentToDo();
cap = Math.min(cap, floor + (itemsNeeded + craftersToBalance.size() - 1) / craftersToBalance.size());
for (CraftingSorterNode crafter : craftersToBalance) {
int request = Math.min(itemsNeeded, cap - floor);
if (request > 0) {
int craftingDone = crafter.addToWorkRequest(request);
// ignored under-crafting
itemsNeeded -= craftingDone;
}
}
}
// all craftersToBalance exhausted, or work completed.
}
// end of else more than 1 crafter at this priority
// commit this work set.
Iterator<CraftingSorterNode> iter = craftersToBalance.iterator();
while (iter.hasNext()) {
CraftingSorterNode c = iter.next();
if (c.stacksOfWorkRequested > 0 && !c.addWorkPromisesToTree()) {
// then it ran out of resources
iter.remove();
}
}
itemsNeeded = getMissingAmount();
if (itemsNeeded <= 0) {
// we have everything we need for this crafting request
break outer;
}
// don't clear, because we might have under-requested, and need to consider these again
if (!craftersToBalance.isEmpty()) {
done = false;
//craftersSamePriority.clear(); // we've extracted all we can from these priority crafters, and we still have more to do, back to the top to get the next priority level.
}
}
//LogisticsPipes.log.info("done");
return isDone();
}
use of java.util.PriorityQueue in project sessdb by ppdai.
the class Level1Merger method mergeSort.
public static void mergeSort(LevelQueue lq1, LevelQueue lq2, int ways, String dir, short shard) throws IOException, ClassNotFoundException {
boolean hasLevel2MapTable = lq2.size() > 0;
List<AbstractMapTable> tables = new ArrayList<AbstractMapTable>(ways);
lq1.getReadLock().lock();
try {
Iterator<AbstractMapTable> iter = lq1.descendingIterator();
for (int i = 0; i < ways - 1; i++) {
tables.add(iter.next());
}
if (hasLevel2MapTable) {
tables.add(lq2.get(0));
} else {
tables.add(iter.next());
}
} finally {
lq1.getReadLock().unlock();
}
long expectedInsertions = 0;
for (AbstractMapTable table : tables) {
expectedInsertions += table.getAppendedSize();
}
if (expectedInsertions > Integer.MAX_VALUE)
expectedInsertions = Integer.MAX_VALUE;
// target table
AbstractSortedMapTable sortedMapTable = new FCMapTable(dir, shard, SDB.LEVEL2, System.nanoTime(), (int) expectedInsertions);
PriorityQueue<QueueElement> pq = new PriorityQueue<QueueElement>();
// build initial heap
for (AbstractMapTable table : tables) {
QueueElement qe = new QueueElement();
qe.sortedMapTable = table;
qe.size = qe.sortedMapTable.getAppendedSize();
qe.index = 0;
qe.queue = new LinkedList<IMapEntry>();
IMapEntry me = qe.getNextMapEntry();
if (me != null) {
qe.key = me.getKey();
qe.mapEntry = me;
qe.keyHash = me.getKeyHash();
pq.add(qe);
}
}
LinkedList<IMapEntry> targetCacheQueue = new LinkedList<IMapEntry>();
// merge sort
while (pq.size() > 0) {
QueueElement qe1 = pq.poll();
// remove old/stale entries
while (pq.peek() != null && qe1.keyHash == pq.peek().keyHash && BytesUtil.compare(qe1.key, pq.peek().key) == 0) {
QueueElement qe2 = pq.poll();
IMapEntry me = qe2.getNextMapEntry();
if (me != null) {
qe2.key = me.getKey();
qe2.mapEntry = me;
qe2.keyHash = me.getKeyHash();
pq.add(qe2);
}
}
// remove deleted or expired entries in final merge sorting
if (!qe1.mapEntry.isDeleted() && !qe1.mapEntry.isExpired()) {
targetCacheQueue.add(qe1.mapEntry);
}
if (targetCacheQueue.size() >= CACHED_MAP_ENTRIES * DEFAULT_MERGE_WAYS) {
while (targetCacheQueue.size() > 0) {
IMapEntry mapEntry = targetCacheQueue.poll();
byte[] value = mapEntry.getValue();
// disk space optimization
if (mapEntry.isExpired()) {
continue;
}
sortedMapTable.appendNew(mapEntry.getKey(), mapEntry.getKeyHash(), value, mapEntry.getTimeToLive(), mapEntry.getCreatedTime(), mapEntry.isDeleted(), mapEntry.isCompressed());
}
}
IMapEntry me = qe1.getNextMapEntry();
if (me != null) {
qe1.key = me.getKey();
qe1.mapEntry = me;
qe1.keyHash = me.getKeyHash();
pq.add(qe1);
}
}
// remaining cached entries
while (targetCacheQueue.size() > 0) {
IMapEntry mapEntry = targetCacheQueue.poll();
byte[] value = mapEntry.getValue();
// disk space optimization
if (mapEntry.isExpired()) {
continue;
}
sortedMapTable.appendNew(mapEntry.getKey(), mapEntry.getKeyHash(), value, mapEntry.getTimeToLive(), mapEntry.getCreatedTime(), mapEntry.isDeleted(), mapEntry.isCompressed());
}
// persist metadata
sortedMapTable.reMap();
sortedMapTable.saveMetadata();
// switching
lq1.getWriteLock().lock();
lq2.getWriteLock().lock();
try {
for (int i = 0; i < ways - 1; i++) {
lq1.removeLast();
}
if (hasLevel2MapTable) {
lq2.removeLast();
} else {
lq1.removeLast();
}
for (AbstractMapTable table : tables) {
table.markUsable(false);
}
sortedMapTable.markUsable(true);
lq2.addFirst(sortedMapTable);
} finally {
lq2.getWriteLock().unlock();
lq1.getWriteLock().unlock();
}
for (AbstractMapTable table : tables) {
table.close();
table.delete();
}
}
use of java.util.PriorityQueue in project sessdb by ppdai.
the class SDB method loadMapTables.
private void loadMapTables() throws IOException, ClassNotFoundException {
File dirFile = new File(dir);
if (!dirFile.exists()) {
dirFile.mkdirs();
}
String[] fileNames = dirFile.list(new FilenameFilter() {
@Override
public boolean accept(File dir, String filename) {
if (filename.endsWith(AbstractMapTable.INDEX_FILE_SUFFIX))
return true;
return false;
}
});
// new DB, setup new active map table
if (fileNames == null || fileNames.length == 0) {
for (short i = 0; i < this.config.getShardNumber(); i++) {
this.activeInMemTables[i] = new HashMapTable(dir, i, LEVEL0, System.nanoTime());
this.activeInMemTables[i].markUsable(true);
// mutable
this.activeInMemTables[i].markImmutable(false);
this.activeInMemTables[i].setCompressionEnabled(this.config.isCompressionEnabled());
}
return;
}
PriorityQueue<AbstractMapTable> pq = new PriorityQueue<AbstractMapTable>();
for (String fileName : fileNames) {
int dotIndex = fileName.lastIndexOf(".");
if (dotIndex > 0) {
fileName = fileName.substring(0, dotIndex);
}
String[] parts = fileName.split("-");
Preconditions.checkArgument(parts != null && parts.length == 3, "on-disk table file names corrupted!");
int level = Integer.parseInt(parts[1]);
if (level == LEVEL0) {
pq.add(new HashMapTable(dir, fileName));
} else if (level == LEVEL1) {
pq.add(new MMFMapTable(dir, fileName));
} else {
pq.add(new FCMapTable(dir, fileName));
}
}
Preconditions.checkArgument(pq.size() > 0, "on-disk table file names corrupted!");
// setup active map table
for (int i = 0; i < this.config.getShardNumber(); i++) {
AbstractMapTable table = pq.poll();
Preconditions.checkArgument(table.getLevel() == 0, "on-disk table file names corrupted, no level 0 map tables");
this.activeInMemTables[table.getShard()] = (HashMapTable) table;
this.activeInMemTables[table.getShard()].markUsable(true);
// mutable
this.activeInMemTables[table.getShard()].markImmutable(false);
this.activeInMemTables[table.getShard()].setCompressionEnabled(this.config.isCompressionEnabled());
}
while (!pq.isEmpty()) {
AbstractMapTable table = pq.poll();
if (table.isUsable()) {
int level = table.getLevel();
LevelQueue lq = levelQueueLists[table.getShard()].get(level);
lq.addLast(table);
} else {
// garbage
table.close();
table.delete();
}
}
}
use of java.util.PriorityQueue in project presto by prestodb.
the class EliminateCrossJoins method getJoinOrder.
/**
* Given JoinGraph determine the order of joins between graph nodes
* by traversing JoinGraph. Any graph traversal algorithm could be used
* here (like BFS or DFS), but we use PriorityQueue to preserve
* original JoinOrder as mush as it is possible. PriorityQueue returns
* next nodes to join in order of their occurrence in original Plan.
*/
public static List<Integer> getJoinOrder(JoinGraph graph) {
ImmutableList.Builder<PlanNode> joinOrder = ImmutableList.builder();
Map<PlanNodeId, Integer> priorities = new HashMap<>();
for (int i = 0; i < graph.size(); i++) {
priorities.put(graph.getNode(i).getId(), i);
}
PriorityQueue<PlanNode> nodesToVisit = new PriorityQueue<>(graph.size(), (Comparator<PlanNode>) (node1, node2) -> priorities.get(node1.getId()).compareTo(priorities.get(node2.getId())));
Set<PlanNode> visited = new HashSet<>();
nodesToVisit.add(graph.getNode(0));
while (!nodesToVisit.isEmpty()) {
PlanNode node = nodesToVisit.poll();
if (!visited.contains(node)) {
visited.add(node);
joinOrder.add(node);
for (JoinGraph.Edge edge : graph.getEdges(node)) {
nodesToVisit.add(edge.getTargetNode());
}
}
if (nodesToVisit.isEmpty() && visited.size() < graph.size()) {
// disconnected graph, find new starting point
Optional<PlanNode> firstNotVisitedNode = graph.getNodes().stream().filter(graphNode -> !visited.contains(graphNode)).findFirst();
if (firstNotVisitedNode.isPresent()) {
nodesToVisit.add(firstNotVisitedNode.get());
}
}
}
checkState(visited.size() == graph.size());
return joinOrder.build().stream().map(node -> priorities.get(node.getId())).collect(toImmutableList());
}
use of java.util.PriorityQueue in project presto by prestodb.
the class ShardCompactor method compactSorted.
public List<ShardInfo> compactSorted(long transactionId, OptionalInt bucketNumber, Set<UUID> uuids, List<ColumnInfo> columns, List<Long> sortColumnIds, List<SortOrder> sortOrders) throws IOException {
checkArgument(sortColumnIds.size() == sortOrders.size(), "sortColumnIds and sortOrders must be of the same size");
long start = System.nanoTime();
List<Long> columnIds = columns.stream().map(ColumnInfo::getColumnId).collect(toList());
List<Type> columnTypes = columns.stream().map(ColumnInfo::getType).collect(toList());
checkArgument(columnIds.containsAll(sortColumnIds), "sortColumnIds must be a subset of columnIds");
List<Integer> sortIndexes = sortColumnIds.stream().map(columnIds::indexOf).collect(toList());
Queue<SortedRowSource> rowSources = new PriorityQueue<>();
StoragePageSink outputPageSink = storageManager.createStoragePageSink(transactionId, bucketNumber, columnIds, columnTypes, false);
try {
for (UUID uuid : uuids) {
ConnectorPageSource pageSource = storageManager.getPageSource(uuid, bucketNumber, columnIds, columnTypes, TupleDomain.all(), readerAttributes);
SortedRowSource rowSource = new SortedRowSource(pageSource, columnTypes, sortIndexes, sortOrders);
rowSources.add(rowSource);
}
while (!rowSources.isEmpty()) {
SortedRowSource rowSource = rowSources.poll();
if (!rowSource.hasNext()) {
// rowSource is empty, close it
rowSource.close();
continue;
}
outputPageSink.appendRow(rowSource.next());
if (outputPageSink.isFull()) {
outputPageSink.flush();
}
rowSources.add(rowSource);
}
outputPageSink.flush();
List<ShardInfo> shardInfos = getFutureValue(outputPageSink.commit());
updateStats(uuids.size(), shardInfos.size(), nanosSince(start).toMillis());
return shardInfos;
} catch (IOException | RuntimeException e) {
outputPageSink.rollback();
throw e;
} finally {
rowSources.forEach(SortedRowSource::closeQuietly);
}
}
Aggregations