use of com.facebook.presto.spi.HostAddress in project presto by prestodb.
the class CassandraSplitManager method getSplitsByTokenRange.
private List<ConnectorSplit> getSplitsByTokenRange(CassandraTable table, String partitionId) {
String schema = table.getTableHandle().getSchemaName();
String tableName = table.getTableHandle().getTableName();
String tokenExpression = table.getTokenExpression();
ImmutableList.Builder<ConnectorSplit> builder = ImmutableList.builder();
List<CassandraTokenSplitManager.TokenSplit> tokenSplits = tokenSplitMgr.getSplits(schema, tableName);
for (CassandraTokenSplitManager.TokenSplit tokenSplit : tokenSplits) {
String condition = buildTokenCondition(tokenExpression, tokenSplit.getStartToken(), tokenSplit.getEndToken());
List<HostAddress> addresses = new HostAddressFactory().AddressNamesToHostAddressList(tokenSplit.getHosts());
CassandraSplit split = new CassandraSplit(connectorId, schema, tableName, partitionId, condition, addresses);
builder.add(split);
}
return builder.build();
}
use of com.facebook.presto.spi.HostAddress in project presto by prestodb.
the class TestHostAddressFactory method testToHostAddressList.
@Test
public void testToHostAddressList() throws Exception {
Set<Host> hosts = ImmutableSet.of(new TestHost(new InetSocketAddress(InetAddress.getByAddress(new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 }), 3000)), new TestHost(new InetSocketAddress(InetAddress.getByAddress(new byte[] { 1, 2, 3, 4 }), 3000)));
HostAddressFactory hostAddressFactory = new HostAddressFactory();
List<HostAddress> list = hostAddressFactory.toHostAddressList(hosts);
assertEquals(list.toString(), "[[102:304:506:708:90a:b0c:d0e:f10], 1.2.3.4]");
}
use of com.facebook.presto.spi.HostAddress in project presto by prestodb.
the class BackgroundHiveSplitLoader method createHiveSplitIterator.
private Iterator<HiveSplit> createHiveSplitIterator(String partitionName, String path, BlockLocation[] blockLocations, long start, long length, Properties schema, List<HivePartitionKey> partitionKeys, boolean splittable, ConnectorSession session, OptionalInt bucketNumber, TupleDomain<HiveColumnHandle> effectivePredicate, Map<Integer, HiveType> columnCoercions) throws IOException {
boolean forceLocalScheduling = HiveSessionProperties.isForceLocalScheduling(session);
if (splittable) {
PeekingIterator<BlockLocation> blockLocationIterator = Iterators.peekingIterator(Arrays.stream(blockLocations).iterator());
return new AbstractIterator<HiveSplit>() {
private long chunkOffset = 0;
@Override
protected HiveSplit computeNext() {
if (!blockLocationIterator.hasNext()) {
return endOfData();
}
BlockLocation blockLocation = blockLocationIterator.peek();
List<HostAddress> addresses;
try {
addresses = toHostAddress(blockLocation.getHosts());
} catch (IOException e) {
throw Throwables.propagate(e);
}
long targetChunkSize;
if (remainingInitialSplits.decrementAndGet() >= 0) {
targetChunkSize = maxInitialSplitSize.toBytes();
} else {
long maxBytes = maxSplitSize.toBytes();
int chunks = toIntExact((long) Math.ceil((blockLocation.getLength() - chunkOffset) * 1.0 / maxBytes));
targetChunkSize = (long) Math.ceil((blockLocation.getLength() - chunkOffset) * 1.0 / chunks);
}
// adjust the actual chunk size to account for the overrun when chunks are slightly bigger than necessary (see above)
long chunkLength = Math.min(targetChunkSize, blockLocation.getLength() - chunkOffset);
HiveSplit result = new HiveSplit(connectorId, table.getDatabaseName(), table.getTableName(), partitionName, path, blockLocation.getOffset() + chunkOffset, chunkLength, schema, partitionKeys, addresses, bucketNumber, forceLocalScheduling && hasRealAddress(addresses), effectivePredicate, columnCoercions);
chunkOffset += chunkLength;
if (chunkOffset >= blockLocation.getLength()) {
checkState(chunkOffset == blockLocation.getLength(), "Error splitting blocks");
blockLocationIterator.next();
chunkOffset = 0;
}
return result;
}
};
} else {
// not splittable, use the hosts from the first block if it exists
List<HostAddress> addresses = ImmutableList.of();
if (blockLocations.length > 0) {
addresses = toHostAddress(blockLocations[0].getHosts());
}
return Iterators.singletonIterator(new HiveSplit(connectorId, table.getDatabaseName(), table.getTableName(), partitionName, path, start, length, schema, partitionKeys, addresses, bucketNumber, forceLocalScheduling && hasRealAddress(addresses), effectivePredicate, columnCoercions));
}
}
use of com.facebook.presto.spi.HostAddress in project presto by prestodb.
the class TopologyAwareNodeSelector method computeAssignments.
@Override
public SplitPlacementResult computeAssignments(Set<Split> splits, List<RemoteTask> existingTasks) {
NodeMap nodeMap = this.nodeMap.get().get();
Multimap<Node, Split> assignment = HashMultimap.create();
NodeAssignmentStats assignmentStats = new NodeAssignmentStats(nodeTaskMap, nodeMap, existingTasks);
int[] topologicCounters = new int[topologicalSplitCounters.size()];
Set<NetworkLocation> filledLocations = new HashSet<>();
Set<Node> blockedExactNodes = new HashSet<>();
boolean splitWaitingForAnyNode = false;
for (Split split : splits) {
if (!split.isRemotelyAccessible()) {
List<Node> candidateNodes = selectExactNodes(nodeMap, split.getAddresses(), includeCoordinator);
if (candidateNodes.isEmpty()) {
log.debug("No nodes available to schedule %s. Available nodes %s", split, nodeMap.getNodesByHost().keys());
throw new PrestoException(NO_NODES_AVAILABLE, "No nodes available to run query");
}
Node chosenNode = bestNodeSplitCount(candidateNodes.iterator(), minCandidates, maxPendingSplitsPerTask, assignmentStats);
if (chosenNode != null) {
assignment.put(chosenNode, split);
assignmentStats.addAssignedSplit(chosenNode);
} else // Exact node set won't matter, if a split is waiting for any node
if (!splitWaitingForAnyNode) {
blockedExactNodes.addAll(candidateNodes);
}
continue;
}
Node chosenNode = null;
int depth = networkLocationSegmentNames.size();
int chosenDepth = 0;
Set<NetworkLocation> locations = new HashSet<>();
for (HostAddress host : split.getAddresses()) {
locations.add(networkLocationCache.get(host));
}
if (locations.isEmpty()) {
// Add the root location
locations.add(ROOT_LOCATION);
depth = 0;
}
// Try each address at progressively shallower network locations
for (int i = depth; i >= 0 && chosenNode == null; i--) {
for (NetworkLocation location : locations) {
// For example, locations which couldn't be located will be at the "root" location
if (location.getSegments().size() < i) {
continue;
}
location = location.subLocation(0, i);
if (filledLocations.contains(location)) {
continue;
}
Set<Node> nodes = nodeMap.getWorkersByNetworkPath().get(location);
chosenNode = bestNodeSplitCount(new ResettableRandomizedIterator<>(nodes), minCandidates, calculateMaxPendingSplits(i, depth), assignmentStats);
if (chosenNode != null) {
chosenDepth = i;
break;
}
filledLocations.add(location);
}
}
if (chosenNode != null) {
assignment.put(chosenNode, split);
assignmentStats.addAssignedSplit(chosenNode);
topologicCounters[chosenDepth]++;
} else {
splitWaitingForAnyNode = true;
}
}
for (int i = 0; i < topologicCounters.length; i++) {
if (topologicCounters[i] > 0) {
topologicalSplitCounters.get(i).update(topologicCounters[i]);
}
}
ListenableFuture<?> blocked;
int maxPendingForWildcardNetworkAffinity = calculateMaxPendingSplits(0, networkLocationSegmentNames.size());
if (splitWaitingForAnyNode) {
blocked = toWhenHasSplitQueueSpaceFuture(existingTasks, calculateLowWatermark(maxPendingForWildcardNetworkAffinity));
} else {
blocked = toWhenHasSplitQueueSpaceFuture(blockedExactNodes, existingTasks, calculateLowWatermark(maxPendingForWildcardNetworkAffinity));
}
return new SplitPlacementResult(blocked, assignment);
}
use of com.facebook.presto.spi.HostAddress in project presto by prestodb.
the class MemorySplitManager method getSplits.
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorTableLayoutHandle layoutHandle) {
MemoryTableLayoutHandle layout = (MemoryTableLayoutHandle) layoutHandle;
List<HostAddress> hosts = layout.getTable().getHosts();
ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder();
for (HostAddress host : hosts) {
for (int i = 0; i < splitsPerNode; i++) {
splits.add(new MemorySplit(layout.getTable(), i, splitsPerNode, ImmutableList.of(host)));
}
}
return new FixedSplitSource(splits.build());
}
Aggregations