use of io.trino.spi.connector.ConnectorTransactionHandle in project trino by trinodb.
the class DeltaLakeConnector method beginTransaction.
@Override
public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel, boolean readOnly, boolean autoCommit) {
checkConnectorSupports(READ_COMMITTED, isolationLevel);
verify(autoCommit, "Catalog only supports writes using autocommit: DeltaLake");
ConnectorTransactionHandle transaction = new HiveTransactionHandle(true);
transactionManager.begin(transaction);
return transaction;
}
use of io.trino.spi.connector.ConnectorTransactionHandle in project trino by trinodb.
the class KafkaSplitManager method getSplits.
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableHandle table, SplitSchedulingStrategy splitSchedulingStrategy, DynamicFilter dynamicFilter) {
KafkaTableHandle kafkaTableHandle = (KafkaTableHandle) table;
try (KafkaConsumer<byte[], byte[]> kafkaConsumer = consumerFactory.create(session)) {
List<PartitionInfo> partitionInfos = kafkaConsumer.partitionsFor(kafkaTableHandle.getTopicName());
List<TopicPartition> topicPartitions = partitionInfos.stream().map(KafkaSplitManager::toTopicPartition).collect(toImmutableList());
Map<TopicPartition, Long> partitionBeginOffsets = kafkaConsumer.beginningOffsets(topicPartitions);
Map<TopicPartition, Long> partitionEndOffsets = kafkaConsumer.endOffsets(topicPartitions);
KafkaFilteringResult kafkaFilteringResult = kafkaFilterManager.getKafkaFilterResult(session, kafkaTableHandle, partitionInfos, partitionBeginOffsets, partitionEndOffsets);
partitionInfos = kafkaFilteringResult.getPartitionInfos();
partitionBeginOffsets = kafkaFilteringResult.getPartitionBeginOffsets();
partitionEndOffsets = kafkaFilteringResult.getPartitionEndOffsets();
ImmutableList.Builder<KafkaSplit> splits = ImmutableList.builder();
Optional<String> keyDataSchemaContents = contentSchemaReader.readKeyContentSchema(kafkaTableHandle);
Optional<String> messageDataSchemaContents = contentSchemaReader.readValueContentSchema(kafkaTableHandle);
for (PartitionInfo partitionInfo : partitionInfos) {
TopicPartition topicPartition = toTopicPartition(partitionInfo);
HostAddress leader = HostAddress.fromParts(partitionInfo.leader().host(), partitionInfo.leader().port());
new Range(partitionBeginOffsets.get(topicPartition), partitionEndOffsets.get(topicPartition)).partition(messagesPerSplit).stream().map(range -> new KafkaSplit(kafkaTableHandle.getTopicName(), kafkaTableHandle.getKeyDataFormat(), kafkaTableHandle.getMessageDataFormat(), keyDataSchemaContents, messageDataSchemaContents, partitionInfo.partition(), range, leader)).forEach(splits::add);
}
return new FixedSplitSource(splits.build());
} catch (Exception e) {
// Catch all exceptions because Kafka library is written in scala and checked exceptions are not declared in method signature.
if (e instanceof TrinoException) {
throw e;
}
throw new TrinoException(KAFKA_SPLIT_ERROR, format("Cannot list splits for table '%s' reading topic '%s'", kafkaTableHandle.getTableName(), kafkaTableHandle.getTopicName()), e);
}
}
use of io.trino.spi.connector.ConnectorTransactionHandle in project trino by trinodb.
the class TestRaptorConnector method createTable.
private long createTable(String name) {
ConnectorTransactionHandle transaction = beginTransaction();
connector.getMetadata(SESSION, transaction).createTable(SESSION, new ConnectorTableMetadata(new SchemaTableName("test", name), ImmutableList.of(new ColumnMetadata("id", BIGINT))), false);
connector.commit(transaction);
transaction = beginTransaction();
ConnectorTableHandle tableHandle = getTableHandle(connector.getMetadata(SESSION, transaction), name);
connector.commit(transaction);
return ((RaptorTableHandle) tableHandle).getTableId();
}
use of io.trino.spi.connector.ConnectorTransactionHandle in project trino by trinodb.
the class TestRaptorConnector method assertSplitShard.
private void assertSplitShard(Type temporalType, String min, String max, int expectedSplits) throws Exception {
ConnectorSession session = TestingConnectorSession.builder().setPropertyMetadata(new RaptorSessionProperties(new StorageManagerConfig()).getSessionProperties()).build();
ConnectorTransactionHandle transaction = beginTransaction();
connector.getMetadata(SESSION, transaction).createTable(SESSION, new ConnectorTableMetadata(new SchemaTableName("test", "test"), ImmutableList.of(new ColumnMetadata("id", BIGINT), new ColumnMetadata("time", temporalType)), ImmutableMap.of(TEMPORAL_COLUMN_PROPERTY, "time")), false);
connector.commit(transaction);
ConnectorTransactionHandle txn1 = beginTransaction();
ConnectorTableHandle handle1 = getTableHandle(connector.getMetadata(SESSION, txn1), "test");
ConnectorInsertTableHandle insertTableHandle = connector.getMetadata(SESSION, txn1).beginInsert(session, handle1);
ConnectorPageSink raptorPageSink = connector.getPageSinkProvider().createPageSink(txn1, session, insertTableHandle);
Object timestamp1 = null;
Object timestamp2 = null;
if (temporalType.equals(TIMESTAMP_MILLIS)) {
timestamp1 = SqlTimestamp.newInstance(3, castToShortTimestamp(TIMESTAMP_MILLIS.getPrecision(), min), 0);
timestamp2 = SqlTimestamp.newInstance(3, castToShortTimestamp(TIMESTAMP_MILLIS.getPrecision(), max), 0);
} else if (temporalType.equals(DATE)) {
timestamp1 = new SqlDate(parseDate(min));
timestamp2 = new SqlDate(parseDate(max));
}
Page inputPage = MaterializedResult.resultBuilder(session, ImmutableList.of(BIGINT, temporalType)).row(1L, timestamp1).row(2L, timestamp2).build().toPage();
raptorPageSink.appendPage(inputPage);
Collection<Slice> shards = raptorPageSink.finish().get();
assertEquals(shards.size(), expectedSplits);
connector.getMetadata(session, txn1).dropTable(session, handle1);
connector.commit(txn1);
}
use of io.trino.spi.connector.ConnectorTransactionHandle in project trino by trinodb.
the class IcebergPageSourceProvider method createPageSource.
@Override
public ConnectorPageSource createPageSource(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorSplit connectorSplit, ConnectorTableHandle connectorTable, List<ColumnHandle> columns, DynamicFilter dynamicFilter) {
IcebergSplit split = (IcebergSplit) connectorSplit;
IcebergTableHandle table = (IcebergTableHandle) connectorTable;
List<IcebergColumnHandle> icebergColumns = columns.stream().map(IcebergColumnHandle.class::cast).collect(toImmutableList());
Map<Integer, Optional<String>> partitionKeys = split.getPartitionKeys();
List<IcebergColumnHandle> regularColumns = columns.stream().map(IcebergColumnHandle.class::cast).filter(column -> !partitionKeys.containsKey(column.getId())).collect(toImmutableList());
TupleDomain<IcebergColumnHandle> effectivePredicate = table.getUnenforcedPredicate().intersect(dynamicFilter.getCurrentPredicate().transformKeys(IcebergColumnHandle.class::cast)).simplify(ICEBERG_DOMAIN_COMPACTION_THRESHOLD);
HdfsContext hdfsContext = new HdfsContext(session);
ReaderPageSource dataPageSource = createDataPageSource(session, hdfsContext, new Path(split.getPath()), split.getStart(), split.getLength(), split.getFileSize(), split.getFileFormat(), regularColumns, effectivePredicate, table.getNameMappingJson().map(NameMappingParser::fromJson));
Optional<ReaderProjectionsAdapter> projectionsAdapter = dataPageSource.getReaderColumns().map(readerColumns -> new ReaderProjectionsAdapter(regularColumns, readerColumns, column -> ((IcebergColumnHandle) column).getType(), IcebergPageSourceProvider::applyProjection));
return new IcebergPageSource(icebergColumns, partitionKeys, dataPageSource.get(), projectionsAdapter);
}
Aggregations