use of io.trino.spi.connector.ConnectorTableProperties in project trino by trinodb.
the class AbstractTestHive method setupHive.
protected void setupHive(String databaseName) {
database = databaseName;
tablePartitionFormat = new SchemaTableName(database, "trino_test_partition_format");
tableUnpartitioned = new SchemaTableName(database, "trino_test_unpartitioned");
tableOffline = new SchemaTableName(database, "trino_test_offline");
tableOfflinePartition = new SchemaTableName(database, "trino_test_offline_partition");
tableNotReadable = new SchemaTableName(database, "trino_test_not_readable");
view = new SchemaTableName(database, "trino_test_view");
invalidTable = new SchemaTableName(database, INVALID_TABLE);
tableBucketedStringInt = new SchemaTableName(database, "trino_test_bucketed_by_string_int");
tableBucketedBigintBoolean = new SchemaTableName(database, "trino_test_bucketed_by_bigint_boolean");
tableBucketedDoubleFloat = new SchemaTableName(database, "trino_test_bucketed_by_double_float");
tablePartitionSchemaChange = new SchemaTableName(database, "trino_test_partition_schema_change");
tablePartitionSchemaChangeNonCanonical = new SchemaTableName(database, "trino_test_partition_schema_change_non_canonical");
tableBucketEvolution = new SchemaTableName(database, "trino_test_bucket_evolution");
invalidTableHandle = new HiveTableHandle(database, INVALID_TABLE, ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), Optional.empty());
dsColumn = createBaseColumn("ds", -1, HIVE_STRING, VARCHAR, PARTITION_KEY, Optional.empty());
fileFormatColumn = createBaseColumn("file_format", -1, HIVE_STRING, VARCHAR, PARTITION_KEY, Optional.empty());
dummyColumn = createBaseColumn("dummy", -1, HIVE_INT, INTEGER, PARTITION_KEY, Optional.empty());
intColumn = createBaseColumn("t_int", -1, HIVE_INT, INTEGER, PARTITION_KEY, Optional.empty());
invalidColumnHandle = createBaseColumn(INVALID_COLUMN, 0, HIVE_STRING, VARCHAR, REGULAR, Optional.empty());
List<ColumnHandle> partitionColumns = ImmutableList.of(dsColumn, fileFormatColumn, dummyColumn);
tablePartitionFormatPartitions = ImmutableList.<HivePartition>builder().add(new HivePartition(tablePartitionFormat, "ds=2012-12-29/file_format=textfile/dummy=1", ImmutableMap.<ColumnHandle, NullableValue>builder().put(dsColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("2012-12-29"))).put(fileFormatColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("textfile"))).put(dummyColumn, NullableValue.of(INTEGER, 1L)).buildOrThrow())).add(new HivePartition(tablePartitionFormat, "ds=2012-12-29/file_format=sequencefile/dummy=2", ImmutableMap.<ColumnHandle, NullableValue>builder().put(dsColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("2012-12-29"))).put(fileFormatColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("sequencefile"))).put(dummyColumn, NullableValue.of(INTEGER, 2L)).buildOrThrow())).add(new HivePartition(tablePartitionFormat, "ds=2012-12-29/file_format=rctext/dummy=3", ImmutableMap.<ColumnHandle, NullableValue>builder().put(dsColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("2012-12-29"))).put(fileFormatColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("rctext"))).put(dummyColumn, NullableValue.of(INTEGER, 3L)).buildOrThrow())).add(new HivePartition(tablePartitionFormat, "ds=2012-12-29/file_format=rcbinary/dummy=4", ImmutableMap.<ColumnHandle, NullableValue>builder().put(dsColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("2012-12-29"))).put(fileFormatColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("rcbinary"))).put(dummyColumn, NullableValue.of(INTEGER, 4L)).buildOrThrow())).build();
tableUnpartitionedPartitions = ImmutableList.of(new HivePartition(tableUnpartitioned));
tablePartitionFormatProperties = new ConnectorTableProperties(TupleDomain.withColumnDomains(ImmutableMap.of(dsColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("2012-12-29"))), false), fileFormatColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("textfile")), Range.equal(createUnboundedVarcharType(), utf8Slice("sequencefile")), Range.equal(createUnboundedVarcharType(), utf8Slice("rctext")), Range.equal(createUnboundedVarcharType(), utf8Slice("rcbinary"))), false), dummyColumn, Domain.create(ValueSet.ofRanges(Range.equal(INTEGER, 1L), Range.equal(INTEGER, 2L), Range.equal(INTEGER, 3L), Range.equal(INTEGER, 4L)), false))), Optional.empty(), Optional.empty(), Optional.of(new DiscretePredicates(partitionColumns, ImmutableList.of(TupleDomain.withColumnDomains(ImmutableMap.of(dsColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("2012-12-29"))), false), fileFormatColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("textfile"))), false), dummyColumn, Domain.create(ValueSet.ofRanges(Range.equal(INTEGER, 1L)), false))), TupleDomain.withColumnDomains(ImmutableMap.of(dsColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("2012-12-29"))), false), fileFormatColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("sequencefile"))), false), dummyColumn, Domain.create(ValueSet.ofRanges(Range.equal(INTEGER, 2L)), false))), TupleDomain.withColumnDomains(ImmutableMap.of(dsColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("2012-12-29"))), false), fileFormatColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("rctext"))), false), dummyColumn, Domain.create(ValueSet.ofRanges(Range.equal(INTEGER, 3L)), false))), TupleDomain.withColumnDomains(ImmutableMap.of(dsColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("2012-12-29"))), false), fileFormatColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("rcbinary"))), false), dummyColumn, Domain.create(ValueSet.ofRanges(Range.equal(INTEGER, 4L)), false)))))), ImmutableList.of());
tableUnpartitionedProperties = new ConnectorTableProperties();
}
use of io.trino.spi.connector.ConnectorTableProperties in project trino by trinodb.
the class TestPushProjectionIntoTableScan method createMockFactory.
private MockConnectorFactory createMockFactory(Map<String, ColumnHandle> assignments, Optional<MockConnectorFactory.ApplyProjection> applyProjection) {
List<ColumnMetadata> metadata = assignments.entrySet().stream().map(entry -> new ColumnMetadata(entry.getKey(), ((TpchColumnHandle) entry.getValue()).getType())).collect(toImmutableList());
MockConnectorFactory.Builder builder = MockConnectorFactory.builder().withListSchemaNames(connectorSession -> ImmutableList.of(TEST_SCHEMA)).withListTables((connectorSession, schema) -> TEST_SCHEMA.equals(schema) ? ImmutableList.of(TEST_SCHEMA_TABLE) : ImmutableList.of()).withGetColumns(schemaTableName -> metadata).withGetTableProperties((session, tableHandle) -> {
MockConnectorTableHandle mockTableHandle = (MockConnectorTableHandle) tableHandle;
if (mockTableHandle.getTableName().getTableName().equals(TEST_TABLE)) {
return new ConnectorTableProperties(TupleDomain.all(), Optional.of(new ConnectorTablePartitioning(PARTITIONING_HANDLE, ImmutableList.of(column("col", VARCHAR)))), Optional.empty(), Optional.empty(), ImmutableList.of());
}
return new ConnectorTableProperties();
});
if (applyProjection.isPresent()) {
builder = builder.withApplyProjection(applyProjection.get());
}
return builder.build();
}
use of io.trino.spi.connector.ConnectorTableProperties in project trino by trinodb.
the class TestPartialTopNWithPresortedInput method createLocalQueryRunner.
@Override
protected LocalQueryRunner createLocalQueryRunner() {
Session session = testSessionBuilder().setCatalog(MOCK_CATALOG).setSchema(TEST_SCHEMA).build();
LocalQueryRunner queryRunner = LocalQueryRunner.builder(session).build();
MockConnectorFactory mockFactory = MockConnectorFactory.builder().withGetTableProperties((connectorSession, handle) -> {
MockConnectorTableHandle tableHandle = (MockConnectorTableHandle) handle;
if (tableHandle.getTableName().equals(tableA)) {
return new ConnectorTableProperties(TupleDomain.all(), Optional.empty(), Optional.empty(), Optional.empty(), ImmutableList.of(new SortingProperty<>(columnHandleA, ASC_NULLS_FIRST)));
}
throw new IllegalArgumentException();
}).withGetColumns(schemaTableName -> {
if (schemaTableName.equals(tableA)) {
return ImmutableList.of(new ColumnMetadata(columnNameA, VARCHAR), new ColumnMetadata(columnNameB, VARCHAR));
}
throw new IllegalArgumentException();
}).build();
queryRunner.createCatalog(MOCK_CATALOG, mockFactory, ImmutableMap.of());
return queryRunner;
}
use of io.trino.spi.connector.ConnectorTableProperties in project trino by trinodb.
the class TestValidateLimitWithPresortedInput method createLocalQueryRunner.
@Override
protected LocalQueryRunner createLocalQueryRunner() {
Session session = testSessionBuilder().setCatalog(MOCK_CATALOG).setSchema(TEST_SCHEMA).build();
LocalQueryRunner queryRunner = LocalQueryRunner.builder(session).build();
MockConnectorFactory mockFactory = MockConnectorFactory.builder().withGetTableProperties((connectorSession, handle) -> {
MockConnectorTableHandle tableHandle = (MockConnectorTableHandle) handle;
if (tableHandle.getTableName().equals(MOCK_TABLE_NAME)) {
return new ConnectorTableProperties(TupleDomain.all(), Optional.empty(), Optional.empty(), Optional.empty(), ImmutableList.of(new SortingProperty<>(COLUMN_HANDLE_A, ASC_NULLS_FIRST), new SortingProperty<>(COLUMN_HANDLE_C, ASC_NULLS_FIRST)));
}
throw new IllegalArgumentException();
}).withGetColumns(schemaTableName -> {
if (schemaTableName.equals(MOCK_TABLE_NAME)) {
return ImmutableList.of(new ColumnMetadata(COLUMN_NAME_A, VARCHAR), new ColumnMetadata(COLUMN_NAME_B, VARCHAR), new ColumnMetadata(COLUMN_NAME_C, VARCHAR));
}
throw new IllegalArgumentException();
}).build();
queryRunner.createCatalog(MOCK_CATALOG, mockFactory, ImmutableMap.of());
return queryRunner;
}
use of io.trino.spi.connector.ConnectorTableProperties in project trino by trinodb.
the class KuduMetadata method getTableProperties.
@Override
public ConnectorTableProperties getTableProperties(ConnectorSession session, ConnectorTableHandle table) {
KuduTableHandle handle = (KuduTableHandle) table;
KuduTable kuduTable = handle.getTable(clientSession);
Optional<ConnectorTablePartitioning> tablePartitioning = Optional.empty();
Optional<Set<ColumnHandle>> partitioningColumns = Optional.empty();
List<LocalProperty<ColumnHandle>> localProperties = ImmutableList.of();
if (isKuduGroupedExecutionEnabled(session) && isTableSupportGroupedExecution(kuduTable)) {
Map<String, ColumnHandle> columnMap = getColumnHandles(session, handle);
List<Integer> bucketColumnIds = getBucketColumnIds(kuduTable);
List<ColumnHandle> bucketColumns = getSpecifyColumns(kuduTable.getSchema(), bucketColumnIds, columnMap);
Optional<List<KuduRangePartition>> kuduRangePartitions = getKuduRangePartitions(kuduTable);
tablePartitioning = Optional.of(new ConnectorTablePartitioning(new KuduPartitioningHandle(handle.getSchemaTableName().getSchemaName(), handle.getSchemaTableName().getTableName(), handle.getBucketCount().orElse(0), bucketColumnIds, bucketColumns.stream().map(KuduColumnHandle.class::cast).map(KuduColumnHandle::getType).collect(Collectors.toList()), kuduRangePartitions), bucketColumns));
partitioningColumns = Optional.of(ImmutableSet.copyOf(bucketColumns));
}
return new ConnectorTableProperties(handle.getConstraint(), tablePartitioning, partitioningColumns, Optional.empty(), localProperties);
}
Aggregations