use of io.trino.spi.connector.ConnectorSplit in project trino by trinodb.
the class KinesisRecordSetProvider method getRecordSet.
@Override
public RecordSet getRecordSet(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorSplit split, List<? extends ColumnHandle> columns) {
KinesisSplit kinesisSplit = (KinesisSplit) split;
List<KinesisColumnHandle> kinesisColumns = columns.stream().map(x -> (KinesisColumnHandle) x).collect(toImmutableList());
ImmutableList.Builder<KinesisColumnHandle> handleBuilder = ImmutableList.builder();
RowDecoder messageDecoder = decoderFactory.create(kinesisSplit.getMessageDataFormat(), new HashMap<>(), kinesisColumns.stream().filter(column -> !column.isInternal()).collect(toImmutableSet()));
for (ColumnHandle handle : columns) {
KinesisColumnHandle columnHandle = (KinesisColumnHandle) handle;
handleBuilder.add(columnHandle);
}
return new KinesisRecordSet(kinesisSplit, session, clientManager, handleBuilder.build(), messageDecoder, kinesisConfig);
}
use of io.trino.spi.connector.ConnectorSplit in project trino by trinodb.
the class KafkaRecordSetProvider method getRecordSet.
@Override
public RecordSet getRecordSet(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorSplit split, ConnectorTableHandle table, List<? extends ColumnHandle> columns) {
KafkaSplit kafkaSplit = (KafkaSplit) split;
List<KafkaColumnHandle> kafkaColumns = columns.stream().map(KafkaColumnHandle.class::cast).collect(toImmutableList());
RowDecoder keyDecoder = decoderFactory.create(kafkaSplit.getKeyDataFormat(), getDecoderParameters(kafkaSplit.getKeyDataSchemaContents()), kafkaColumns.stream().filter(col -> !col.isInternal()).filter(KafkaColumnHandle::isKeyCodec).collect(toImmutableSet()));
RowDecoder messageDecoder = decoderFactory.create(kafkaSplit.getMessageDataFormat(), getDecoderParameters(kafkaSplit.getMessageDataSchemaContents()), kafkaColumns.stream().filter(col -> !col.isInternal()).filter(col -> !col.isKeyCodec()).collect(toImmutableSet()));
return new KafkaRecordSet(kafkaSplit, consumerFactory, session, kafkaColumns, keyDecoder, messageDecoder);
}
use of io.trino.spi.connector.ConnectorSplit in project trino by trinodb.
the class TpcdsSplitManager method getSplits.
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableHandle tableHandle, SplitSchedulingStrategy splitSchedulingStrategy, DynamicFilter dynamicFilter) {
Set<Node> nodes = nodeManager.getRequiredWorkerNodes();
checkState(!nodes.isEmpty(), "No TPCDS nodes available");
int totalParts = nodes.size() * splitsPerNode;
int partNumber = 0;
// Split the data using split and skew by the number of nodes available.
ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder();
for (Node node : nodes) {
for (int i = 0; i < splitsPerNode; i++) {
splits.add(new TpcdsSplit(partNumber, totalParts, ImmutableList.of(node.getHostAndPort()), noSexism));
partNumber++;
}
}
return new FixedSplitSource(splits.build());
}
use of io.trino.spi.connector.ConnectorSplit in project trino by trinodb.
the class TpchSplitManager method getSplits.
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableHandle table, SplitSchedulingStrategy splitSchedulingStrategy, DynamicFilter dynamicFilter) {
Set<Node> nodes = nodeManager.getRequiredWorkerNodes();
int totalParts = nodes.size() * splitsPerNode;
int partNumber = 0;
// Split the data using split and skew by the number of nodes available.
ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder();
for (Node node : nodes) {
for (int i = 0; i < splitsPerNode; i++) {
splits.add(new TpchSplit(partNumber, totalParts, ImmutableList.of(node.getHostAndPort())));
partNumber++;
}
}
return new FixedSplitSource(splits.build());
}
Aggregations