use of io.trino.spi.connector.ConnectorSplitSource in project trino by trinodb.
the class TestRaptorSplitManager method testNoHostForShard.
@Test(expectedExceptions = TrinoException.class, expectedExceptionsMessageRegExp = "No host for shard .* found: \\[\\]")
public void testNoHostForShard() {
deleteShardNodes();
ConnectorSplitSource splitSource = getSplits(raptorSplitManager, tableHandle);
getSplits(splitSource, 1000);
}
use of io.trino.spi.connector.ConnectorSplitSource in project trino by trinodb.
the class AbstractTestHive method testPartitionSchemaNonCanonical.
// TODO coercion of non-canonical values should be supported
@Test(enabled = false)
public void testPartitionSchemaNonCanonical() throws Exception {
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorTableHandle table = getTableHandle(metadata, tablePartitionSchemaChangeNonCanonical);
ColumnHandle column = metadata.getColumnHandles(session, table).get("t_boolean");
Constraint constraint = new Constraint(TupleDomain.fromFixedValues(ImmutableMap.of(column, NullableValue.of(BOOLEAN, false))));
table = applyFilter(metadata, table, constraint);
HivePartition partition = getOnlyElement(((HiveTableHandle) table).getPartitions().orElseThrow(AssertionError::new));
assertEquals(getPartitionId(partition), "t_boolean=0");
ConnectorSplitSource splitSource = getSplits(splitManager, transaction, session, table);
ConnectorSplit split = getOnlyElement(getAllSplits(splitSource));
ImmutableList<ColumnHandle> columnHandles = ImmutableList.of(column);
try (ConnectorPageSource ignored = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, split, table, columnHandles, DynamicFilter.EMPTY)) {
fail("expected exception");
} catch (TrinoException e) {
assertEquals(e.getErrorCode(), HIVE_INVALID_PARTITION_VALUE.toErrorCode());
}
}
}
use of io.trino.spi.connector.ConnectorSplitSource in project trino by trinodb.
the class SplitManager method getSplits.
public SplitSource getSplits(Session session, TableHandle table, SplitSchedulingStrategy splitSchedulingStrategy, DynamicFilter dynamicFilter, Constraint constraint) {
CatalogName catalogName = table.getCatalogName();
ConnectorSplitManager splitManager = getConnectorSplitManager(catalogName);
if (!isAllowPushdownIntoConnectors(session)) {
dynamicFilter = DynamicFilter.EMPTY;
}
ConnectorSession connectorSession = session.toConnectorSession(catalogName);
ConnectorSplitSource source = splitManager.getSplits(table.getTransaction(), connectorSession, table.getConnectorHandle(), splitSchedulingStrategy, dynamicFilter, constraint);
SplitSource splitSource = new ConnectorAwareSplitSource(catalogName, source);
if (minScheduleSplitBatchSize > 1) {
splitSource = new BufferingSplitSource(splitSource, minScheduleSplitBatchSize);
}
return splitSource;
}
use of io.trino.spi.connector.ConnectorSplitSource in project trino by trinodb.
the class AbstractTestHiveFileSystem method readTable.
protected MaterializedResult readTable(SchemaTableName tableName) throws IOException {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
ConnectorTableHandle table = getTableHandle(metadata, tableName);
List<ColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(session, table).values());
metadata.beginQuery(session);
ConnectorSplitSource splitSource = getSplits(splitManager, transaction, session, table);
List<Type> allTypes = getTypes(columnHandles);
List<Type> dataTypes = getTypes(columnHandles.stream().filter(columnHandle -> !((HiveColumnHandle) columnHandle).isHidden()).collect(toImmutableList()));
MaterializedResult.Builder result = MaterializedResult.resultBuilder(session, dataTypes);
List<ConnectorSplit> splits = getAllSplits(splitSource);
for (ConnectorSplit split : splits) {
try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, split, table, columnHandles, DynamicFilter.EMPTY)) {
MaterializedResult pageSourceResult = materializeSourceDataStream(session, pageSource, allTypes);
for (MaterializedRow row : pageSourceResult.getMaterializedRows()) {
Object[] dataValues = IntStream.range(0, row.getFieldCount()).filter(channel -> !((HiveColumnHandle) columnHandles.get(channel)).isHidden()).mapToObj(row::getField).toArray();
result.row(dataValues);
}
}
}
metadata.cleanupQuery(session);
return result.build();
}
}
use of io.trino.spi.connector.ConnectorSplitSource in project trino by trinodb.
the class TestJdbcRecordSetProvider method getCursor.
private RecordCursor getCursor(JdbcTableHandle jdbcTableHandle, List<JdbcColumnHandle> columns, TupleDomain<ColumnHandle> domain) {
jdbcTableHandle = new JdbcTableHandle(jdbcTableHandle.getRelationHandle(), domain, ImmutableList.of(), Optional.empty(), OptionalLong.empty(), Optional.empty(), jdbcTableHandle.getOtherReferencedTables(), jdbcTableHandle.getNextSyntheticColumnId());
ConnectorSplitSource splits = jdbcClient.getSplits(SESSION, jdbcTableHandle);
JdbcSplit split = (JdbcSplit) getOnlyElement(getFutureValue(splits.getNextBatch(NOT_PARTITIONED, 1000)).getSplits());
ConnectorTransactionHandle transaction = new JdbcTransactionHandle();
JdbcRecordSetProvider recordSetProvider = new JdbcRecordSetProvider(jdbcClient, executor);
RecordSet recordSet = recordSetProvider.getRecordSet(transaction, SESSION, split, jdbcTableHandle, columns);
return recordSet.cursor();
}
Aggregations