use of com.facebook.presto.spi.ConnectorSplit in project presto by prestodb.
the class AbstractTestHiveClientS3 method testGetRecordsS3.
@Test
public void testGetRecordsS3() throws Exception {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
ConnectorTableHandle table = getTableHandle(metadata, tableS3);
List<ColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(session, table).values());
Map<String, Integer> columnIndex = indexColumns(columnHandles);
List<ConnectorTableLayoutResult> tableLayoutResults = metadata.getTableLayouts(session, table, new Constraint<>(TupleDomain.all(), bindings -> true), Optional.empty());
HiveTableLayoutHandle layoutHandle = (HiveTableLayoutHandle) getOnlyElement(tableLayoutResults).getTableLayout().getHandle();
assertEquals(layoutHandle.getPartitions().get().size(), 1);
ConnectorSplitSource splitSource = splitManager.getSplits(transaction.getTransactionHandle(), session, layoutHandle);
long sum = 0;
for (ConnectorSplit split : getAllSplits(splitSource)) {
try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, split, columnHandles)) {
MaterializedResult result = materializeSourceDataStream(session, pageSource, getTypes(columnHandles));
for (MaterializedRow row : result) {
sum += (Long) row.getField(columnIndex.get("t_bigint"));
}
}
}
assertEquals(sum, 78300);
}
}
use of com.facebook.presto.spi.ConnectorSplit in project presto by prestodb.
the class NodePartitioningManager method getNodePartitioningMap.
public NodePartitionMap getNodePartitioningMap(Session session, PartitioningHandle partitioningHandle) {
requireNonNull(session, "session is null");
requireNonNull(partitioningHandle, "partitioningHandle is null");
if (partitioningHandle.getConnectorHandle() instanceof SystemPartitioningHandle) {
return ((SystemPartitioningHandle) partitioningHandle.getConnectorHandle()).getNodePartitionMap(session, nodeScheduler);
}
ConnectorNodePartitioningProvider partitioningProvider = partitioningProviders.get(partitioningHandle.getConnectorId().get());
checkArgument(partitioningProvider != null, "No partitioning provider for connector %s", partitioningHandle.getConnectorId().get());
Map<Integer, Node> bucketToNode = partitioningProvider.getBucketToNode(partitioningHandle.getTransactionHandle().orElse(null), session.toConnectorSession(), partitioningHandle.getConnectorHandle());
checkArgument(bucketToNode != null, "No partition map %s", partitioningHandle);
checkArgument(!bucketToNode.isEmpty(), "Partition map %s is empty", partitioningHandle);
int bucketCount = bucketToNode.keySet().stream().mapToInt(Integer::intValue).max().getAsInt() + 1;
// safety check for crazy partitioning
checkArgument(bucketCount < 1_000_000, "Too many buckets in partitioning: %s", bucketCount);
int[] bucketToPartition = new int[bucketCount];
BiMap<Node, Integer> nodeToPartition = HashBiMap.create();
int nextPartitionId = 0;
for (Entry<Integer, Node> entry : bucketToNode.entrySet()) {
Integer partitionId = nodeToPartition.get(entry.getValue());
if (partitionId == null) {
partitionId = nextPartitionId++;
nodeToPartition.put(entry.getValue(), partitionId);
}
bucketToPartition[entry.getKey()] = partitionId;
}
ToIntFunction<ConnectorSplit> splitBucketFunction = partitioningProvider.getSplitBucketFunction(partitioningHandle.getTransactionHandle().orElse(null), session.toConnectorSession(), partitioningHandle.getConnectorHandle());
checkArgument(splitBucketFunction != null, "No partitioning %s", partitioningHandle);
return new NodePartitionMap(nodeToPartition.inverse(), bucketToPartition, split -> splitBucketFunction.applyAsInt(split.getConnectorSplit()));
}
use of com.facebook.presto.spi.ConnectorSplit in project presto by prestodb.
the class MemorySplitManager method getSplits.
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorTableLayoutHandle layoutHandle) {
MemoryTableLayoutHandle layout = (MemoryTableLayoutHandle) layoutHandle;
List<HostAddress> hosts = layout.getTable().getHosts();
ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder();
for (HostAddress host : hosts) {
for (int i = 0; i < splitsPerNode; i++) {
splits.add(new MemorySplit(layout.getTable(), i, splitsPerNode, ImmutableList.of(host)));
}
}
return new FixedSplitSource(splits.build());
}
use of com.facebook.presto.spi.ConnectorSplit in project presto by prestodb.
the class SystemSplitManager method getSplits.
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorTableLayoutHandle layout) {
SystemTableLayoutHandle layoutHandle = (SystemTableLayoutHandle) layout;
SystemTableHandle tableHandle = layoutHandle.getTable();
TupleDomain<ColumnHandle> constraint = layoutHandle.getConstraint();
SystemTable systemTable = tables.get(tableHandle.getSchemaTableName());
Distribution tableDistributionMode = systemTable.getDistribution();
if (tableDistributionMode == SINGLE_COORDINATOR) {
HostAddress address = nodeManager.getCurrentNode().getHostAndPort();
ConnectorSplit split = new SystemSplit(tableHandle.getConnectorId(), tableHandle, address, constraint);
return new FixedSplitSource(ImmutableList.of(split));
}
ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder();
ImmutableSet.Builder<Node> nodes = ImmutableSet.builder();
if (tableDistributionMode == ALL_COORDINATORS) {
nodes.addAll(nodeManager.getCoordinators());
} else if (tableDistributionMode == ALL_NODES) {
nodes.addAll(nodeManager.getNodes(ACTIVE));
}
Set<Node> nodeSet = nodes.build();
for (Node node : nodeSet) {
splits.add(new SystemSplit(tableHandle.getConnectorId(), tableHandle, node.getHostAndPort(), constraint));
}
return new FixedSplitSource(splits.build());
}
use of com.facebook.presto.spi.ConnectorSplit in project presto by prestodb.
the class KafkaSplitManager method getSplits.
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableLayoutHandle layout) {
KafkaTableHandle kafkaTableHandle = convertLayout(layout).getTable();
SimpleConsumer simpleConsumer = consumerManager.getConsumer(selectRandom(nodes));
TopicMetadataRequest topicMetadataRequest = new TopicMetadataRequest(ImmutableList.of(kafkaTableHandle.getTopicName()));
TopicMetadataResponse topicMetadataResponse = simpleConsumer.send(topicMetadataRequest);
ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder();
for (TopicMetadata metadata : topicMetadataResponse.topicsMetadata()) {
for (PartitionMetadata part : metadata.partitionsMetadata()) {
log.debug("Adding Partition %s/%s", metadata.topic(), part.partitionId());
Broker leader = part.leader();
if (leader == null) {
// Leader election going on...
log.warn("No leader for partition %s/%s found!", metadata.topic(), part.partitionId());
continue;
}
HostAddress partitionLeader = HostAddress.fromParts(leader.host(), leader.port());
SimpleConsumer leaderConsumer = consumerManager.getConsumer(partitionLeader);
// Kafka contains a reverse list of "end - start" pairs for the splits
long[] offsets = findAllOffsets(leaderConsumer, metadata.topic(), part.partitionId());
for (int i = offsets.length - 1; i > 0; i--) {
KafkaSplit split = new KafkaSplit(connectorId, metadata.topic(), kafkaTableHandle.getKeyDataFormat(), kafkaTableHandle.getMessageDataFormat(), part.partitionId(), offsets[i], offsets[i - 1], partitionLeader);
splits.add(split);
}
}
}
return new FixedSplitSource(splits.build());
}
Aggregations