use of org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint in project drill by apache.
the class MockGroupScanPOP method applyAssignments.
@SuppressWarnings("unchecked")
@Override
public void applyAssignments(List<DrillbitEndpoint> endpoints) {
Preconditions.checkArgument(endpoints.size() <= getReadEntries().size());
mappings = new LinkedList[endpoints.size()];
int i = 0;
for (MockScanEntry e : this.getReadEntries()) {
if (i == endpoints.size()) {
i -= endpoints.size();
}
LinkedList<MockScanEntry> entries = mappings[i];
if (entries == null) {
entries = new LinkedList<MockScanEntry>();
mappings[i] = entries;
}
entries.add(e);
i++;
}
}
use of org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint in project drill by apache.
the class DrillRoot method getClusterInfoJSON.
@GET
@Path("/cluster.json")
@Produces(MediaType.APPLICATION_JSON)
public ClusterInfo getClusterInfoJSON() {
final Collection<DrillbitInfo> drillbits = Sets.newTreeSet();
final Collection<String> mismatchedVersions = Sets.newTreeSet();
final DrillbitEndpoint currentDrillbit = work.getContext().getEndpoint();
final String currentVersion = currentDrillbit.getVersion();
final DrillConfig config = work.getContext().getConfig();
final boolean userEncryptionEnabled = config.getBoolean(ExecConstants.USER_ENCRYPTION_SASL_ENABLED);
final boolean bitEncryptionEnabled = config.getBoolean(ExecConstants.BIT_ENCRYPTION_SASL_ENABLED);
for (DrillbitEndpoint endpoint : work.getContext().getBits()) {
final DrillbitInfo drillbit = new DrillbitInfo(endpoint, currentDrillbit.equals(endpoint), currentVersion.equals(endpoint.getVersion()));
if (!drillbit.isVersionMatch()) {
mismatchedVersions.add(drillbit.getVersion());
}
drillbits.add(drillbit);
}
return new ClusterInfo(drillbits, currentVersion, mismatchedVersions, userEncryptionEnabled, bitEncryptionEnabled);
}
use of org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint in project drill by apache.
the class MongoGroupScan method applyAssignments.
@Override
public void applyAssignments(List<DrillbitEndpoint> endpoints) throws PhysicalOperatorSetupException {
logger.debug("Incoming endpoints :" + endpoints);
watch.reset();
watch.start();
final int numSlots = endpoints.size();
int totalAssignmentsTobeDone = chunksMapping.size();
Preconditions.checkArgument(numSlots <= totalAssignmentsTobeDone, String.format("Incoming endpoints %d is greater than number of chunks %d", numSlots, totalAssignmentsTobeDone));
final int minPerEndpointSlot = (int) Math.floor((double) totalAssignmentsTobeDone / numSlots);
final int maxPerEndpointSlot = (int) Math.ceil((double) totalAssignmentsTobeDone / numSlots);
endpointFragmentMapping = Maps.newHashMapWithExpectedSize(numSlots);
Map<String, Queue<Integer>> endpointHostIndexListMap = Maps.newHashMap();
for (int i = 0; i < numSlots; ++i) {
endpointFragmentMapping.put(i, new ArrayList<MongoSubScanSpec>(maxPerEndpointSlot));
String hostname = endpoints.get(i).getAddress();
Queue<Integer> hostIndexQueue = endpointHostIndexListMap.get(hostname);
if (hostIndexQueue == null) {
hostIndexQueue = Lists.newLinkedList();
endpointHostIndexListMap.put(hostname, hostIndexQueue);
}
hostIndexQueue.add(i);
}
Set<Entry<String, List<ChunkInfo>>> chunksToAssignSet = Sets.newHashSet(chunksInverseMapping.entrySet());
for (Iterator<Entry<String, List<ChunkInfo>>> chunksIterator = chunksToAssignSet.iterator(); chunksIterator.hasNext(); ) {
Entry<String, List<ChunkInfo>> chunkEntry = chunksIterator.next();
Queue<Integer> slots = endpointHostIndexListMap.get(chunkEntry.getKey());
if (slots != null) {
for (ChunkInfo chunkInfo : chunkEntry.getValue()) {
Integer slotIndex = slots.poll();
List<MongoSubScanSpec> subScanSpecList = endpointFragmentMapping.get(slotIndex);
subScanSpecList.add(buildSubScanSpecAndGet(chunkInfo));
slots.offer(slotIndex);
}
chunksIterator.remove();
}
}
PriorityQueue<List<MongoSubScanSpec>> minHeap = new PriorityQueue<List<MongoSubScanSpec>>(numSlots, LIST_SIZE_COMPARATOR);
PriorityQueue<List<MongoSubScanSpec>> maxHeap = new PriorityQueue<List<MongoSubScanSpec>>(numSlots, LIST_SIZE_COMPARATOR_REV);
for (List<MongoSubScanSpec> listOfScan : endpointFragmentMapping.values()) {
if (listOfScan.size() < minPerEndpointSlot) {
minHeap.offer(listOfScan);
} else if (listOfScan.size() > minPerEndpointSlot) {
maxHeap.offer(listOfScan);
}
}
if (chunksToAssignSet.size() > 0) {
for (Entry<String, List<ChunkInfo>> chunkEntry : chunksToAssignSet) {
for (ChunkInfo chunkInfo : chunkEntry.getValue()) {
List<MongoSubScanSpec> smallestList = minHeap.poll();
smallestList.add(buildSubScanSpecAndGet(chunkInfo));
minHeap.offer(smallestList);
}
}
}
while (minHeap.peek() != null && minHeap.peek().size() < minPerEndpointSlot) {
List<MongoSubScanSpec> smallestList = minHeap.poll();
List<MongoSubScanSpec> largestList = maxHeap.poll();
smallestList.add(largestList.remove(largestList.size() - 1));
if (largestList.size() > minPerEndpointSlot) {
maxHeap.offer(largestList);
}
if (smallestList.size() < minPerEndpointSlot) {
minHeap.offer(smallestList);
}
}
logger.debug("Built assignment map in {} µs.\nEndpoints: {}.\nAssignment Map: {}", watch.elapsed(TimeUnit.NANOSECONDS) / 1000, endpoints, endpointFragmentMapping.toString());
}
use of org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint in project drill by apache.
the class SoftAffinityFragmentParallelizer method parallelizeFragment.
@Override
public void parallelizeFragment(final Wrapper fragmentWrapper, final ParallelizationParameters parameters, final Collection<DrillbitEndpoint> activeEndpoints) throws PhysicalOperatorSetupException {
final Fragment fragment = fragmentWrapper.getNode();
// Find the parallelization width of fragment
final Stats stats = fragmentWrapper.getStats();
final ParallelizationInfo parallelizationInfo = stats.getParallelizationInfo();
// 1. Find the parallelization based on cost. Use max cost of all operators in this fragment; this is consistent
// with the calculation that ExcessiveExchangeRemover uses.
int width = (int) Math.ceil(stats.getMaxCost() / parameters.getSliceTarget());
// 2. Cap the parallelization width by fragment level width limit and system level per query width limit
width = Math.min(width, Math.min(parallelizationInfo.getMaxWidth(), parameters.getMaxGlobalWidth()));
// 3. Cap the parallelization width by system level per node width limit
width = Math.min(width, parameters.getMaxWidthPerNode() * activeEndpoints.size());
// 4. Make sure width is at least the min width enforced by operators
width = Math.max(parallelizationInfo.getMinWidth(), width);
// 4. Make sure width is at most the max width enforced by operators
width = Math.min(parallelizationInfo.getMaxWidth(), width);
// 5 Finally make sure the width is at least one
width = Math.max(1, width);
fragmentWrapper.setWidth(width);
final List<DrillbitEndpoint> assignedEndpoints = findEndpoints(activeEndpoints, parallelizationInfo.getEndpointAffinityMap(), fragmentWrapper.getWidth(), parameters);
fragmentWrapper.assignEndpoints(assignedEndpoints);
}
use of org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint in project drill by apache.
the class StatsCollector method visitReceivingExchange.
@Override
public Void visitReceivingExchange(Exchange exchange, Wrapper wrapper) throws RuntimeException {
// Handle the receiving side Exchange
final List<ExchangeFragmentPair> receivingExchangePairs = wrapper.getNode().getReceivingExchangePairs();
// List to contain the endpoints where the fragment that send dat to this fragment are running.
final List<DrillbitEndpoint> sendingEndpoints = Lists.newArrayList();
for (ExchangeFragmentPair pair : receivingExchangePairs) {
if (pair.getExchange() == exchange) {
Wrapper sendingFragment = planningSet.get(pair.getNode());
if (sendingFragment.isEndpointsAssignmentDone()) {
sendingEndpoints.addAll(sendingFragment.getAssignedEndpoints());
}
}
}
wrapper.getStats().addParallelizationInfo(exchange.getReceiverParallelizationInfo(sendingEndpoints));
// no traversal since it would cross current fragment boundary.
return null;
}
Aggregations