use of io.trino.spi.predicate.NullableValue in project trino by trinodb.
the class HiveBucketing method getHiveBuckets.
private static Optional<Set<Integer>> getHiveBuckets(HiveBucketProperty hiveBucketProperty, List<Column> dataColumns, Map<ColumnHandle, List<NullableValue>> bindings) {
if (bindings.isEmpty()) {
return Optional.empty();
}
// Get bucket columns names
List<String> bucketColumns = hiveBucketProperty.getBucketedBy();
// Verify the bucket column types are supported
Map<String, HiveType> hiveTypes = new HashMap<>();
for (Column column : dataColumns) {
hiveTypes.put(column.getName(), column.getType());
}
for (String column : bucketColumns) {
if (!SUPPORTED_TYPES_FOR_BUCKET_FILTER.contains(hiveTypes.get(column))) {
return Optional.empty();
}
}
// Get bindings for bucket columns
Map<String, List<NullableValue>> bucketBindings = new HashMap<>();
for (Entry<ColumnHandle, List<NullableValue>> entry : bindings.entrySet()) {
HiveColumnHandle columnHandle = (HiveColumnHandle) entry.getKey();
if (bucketColumns.contains(columnHandle.getName())) {
bucketBindings.put(columnHandle.getName(), entry.getValue());
}
}
// Check that we have bindings for all bucket columns
if (bucketBindings.size() != bucketColumns.size()) {
return Optional.empty();
}
// Order bucket column bindings accordingly to bucket columns order
List<List<NullableValue>> orderedBindings = bucketColumns.stream().map(bucketBindings::get).collect(toImmutableList());
// Get TypeInfos for bucket columns
List<TypeInfo> typeInfos = bucketColumns.stream().map(name -> hiveTypes.get(name).getTypeInfo()).collect(toImmutableList());
return getHiveBuckets(hiveBucketProperty.getBucketingVersion(), hiveBucketProperty.getBucketCount(), typeInfos, orderedBindings);
}
use of io.trino.spi.predicate.NullableValue in project trino by trinodb.
the class AbstractTestHive method testBucketedTableBigintBoolean.
@SuppressWarnings("ConstantConditions")
@Test
public void testBucketedTableBigintBoolean() throws Exception {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
metadata.beginQuery(session);
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableBucketedBigintBoolean);
List<ColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(session, tableHandle).values());
Map<String, Integer> columnIndex = indexColumns(columnHandles);
assertTableIsBucketed(tableHandle, transaction, session);
ConnectorTableProperties properties = metadata.getTableProperties(newSession(ImmutableMap.of("propagate_table_scan_sorting_properties", true)), tableHandle);
// trino_test_bucketed_by_bigint_boolean does not define sorting, therefore local properties is empty
assertTrue(properties.getLocalProperties().isEmpty());
assertTrue(metadata.getTableProperties(newSession(), tableHandle).getLocalProperties().isEmpty());
String testString = "test";
Long testBigint = 89L;
Boolean testBoolean = true;
ImmutableMap<ColumnHandle, NullableValue> bindings = ImmutableMap.<ColumnHandle, NullableValue>builder().put(columnHandles.get(columnIndex.get("t_string")), NullableValue.of(createUnboundedVarcharType(), utf8Slice(testString))).put(columnHandles.get(columnIndex.get("t_bigint")), NullableValue.of(BIGINT, testBigint)).put(columnHandles.get(columnIndex.get("t_boolean")), NullableValue.of(BOOLEAN, testBoolean)).buildOrThrow();
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.fromFixedValues(bindings), OptionalInt.of(1), Optional.empty());
boolean rowFound = false;
for (MaterializedRow row : result) {
if (testString.equals(row.getField(columnIndex.get("t_string"))) && testBigint.equals(row.getField(columnIndex.get("t_bigint"))) && testBoolean.equals(row.getField(columnIndex.get("t_boolean")))) {
rowFound = true;
break;
}
}
assertTrue(rowFound);
}
}
use of io.trino.spi.predicate.NullableValue in project trino by trinodb.
the class AbstractTestHive method setupHive.
protected void setupHive(String databaseName) {
database = databaseName;
tablePartitionFormat = new SchemaTableName(database, "trino_test_partition_format");
tableUnpartitioned = new SchemaTableName(database, "trino_test_unpartitioned");
tableOffline = new SchemaTableName(database, "trino_test_offline");
tableOfflinePartition = new SchemaTableName(database, "trino_test_offline_partition");
tableNotReadable = new SchemaTableName(database, "trino_test_not_readable");
view = new SchemaTableName(database, "trino_test_view");
invalidTable = new SchemaTableName(database, INVALID_TABLE);
tableBucketedStringInt = new SchemaTableName(database, "trino_test_bucketed_by_string_int");
tableBucketedBigintBoolean = new SchemaTableName(database, "trino_test_bucketed_by_bigint_boolean");
tableBucketedDoubleFloat = new SchemaTableName(database, "trino_test_bucketed_by_double_float");
tablePartitionSchemaChange = new SchemaTableName(database, "trino_test_partition_schema_change");
tablePartitionSchemaChangeNonCanonical = new SchemaTableName(database, "trino_test_partition_schema_change_non_canonical");
tableBucketEvolution = new SchemaTableName(database, "trino_test_bucket_evolution");
invalidTableHandle = new HiveTableHandle(database, INVALID_TABLE, ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), Optional.empty());
dsColumn = createBaseColumn("ds", -1, HIVE_STRING, VARCHAR, PARTITION_KEY, Optional.empty());
fileFormatColumn = createBaseColumn("file_format", -1, HIVE_STRING, VARCHAR, PARTITION_KEY, Optional.empty());
dummyColumn = createBaseColumn("dummy", -1, HIVE_INT, INTEGER, PARTITION_KEY, Optional.empty());
intColumn = createBaseColumn("t_int", -1, HIVE_INT, INTEGER, PARTITION_KEY, Optional.empty());
invalidColumnHandle = createBaseColumn(INVALID_COLUMN, 0, HIVE_STRING, VARCHAR, REGULAR, Optional.empty());
List<ColumnHandle> partitionColumns = ImmutableList.of(dsColumn, fileFormatColumn, dummyColumn);
tablePartitionFormatPartitions = ImmutableList.<HivePartition>builder().add(new HivePartition(tablePartitionFormat, "ds=2012-12-29/file_format=textfile/dummy=1", ImmutableMap.<ColumnHandle, NullableValue>builder().put(dsColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("2012-12-29"))).put(fileFormatColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("textfile"))).put(dummyColumn, NullableValue.of(INTEGER, 1L)).buildOrThrow())).add(new HivePartition(tablePartitionFormat, "ds=2012-12-29/file_format=sequencefile/dummy=2", ImmutableMap.<ColumnHandle, NullableValue>builder().put(dsColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("2012-12-29"))).put(fileFormatColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("sequencefile"))).put(dummyColumn, NullableValue.of(INTEGER, 2L)).buildOrThrow())).add(new HivePartition(tablePartitionFormat, "ds=2012-12-29/file_format=rctext/dummy=3", ImmutableMap.<ColumnHandle, NullableValue>builder().put(dsColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("2012-12-29"))).put(fileFormatColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("rctext"))).put(dummyColumn, NullableValue.of(INTEGER, 3L)).buildOrThrow())).add(new HivePartition(tablePartitionFormat, "ds=2012-12-29/file_format=rcbinary/dummy=4", ImmutableMap.<ColumnHandle, NullableValue>builder().put(dsColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("2012-12-29"))).put(fileFormatColumn, NullableValue.of(createUnboundedVarcharType(), utf8Slice("rcbinary"))).put(dummyColumn, NullableValue.of(INTEGER, 4L)).buildOrThrow())).build();
tableUnpartitionedPartitions = ImmutableList.of(new HivePartition(tableUnpartitioned));
tablePartitionFormatProperties = new ConnectorTableProperties(TupleDomain.withColumnDomains(ImmutableMap.of(dsColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("2012-12-29"))), false), fileFormatColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("textfile")), Range.equal(createUnboundedVarcharType(), utf8Slice("sequencefile")), Range.equal(createUnboundedVarcharType(), utf8Slice("rctext")), Range.equal(createUnboundedVarcharType(), utf8Slice("rcbinary"))), false), dummyColumn, Domain.create(ValueSet.ofRanges(Range.equal(INTEGER, 1L), Range.equal(INTEGER, 2L), Range.equal(INTEGER, 3L), Range.equal(INTEGER, 4L)), false))), Optional.empty(), Optional.empty(), Optional.of(new DiscretePredicates(partitionColumns, ImmutableList.of(TupleDomain.withColumnDomains(ImmutableMap.of(dsColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("2012-12-29"))), false), fileFormatColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("textfile"))), false), dummyColumn, Domain.create(ValueSet.ofRanges(Range.equal(INTEGER, 1L)), false))), TupleDomain.withColumnDomains(ImmutableMap.of(dsColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("2012-12-29"))), false), fileFormatColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("sequencefile"))), false), dummyColumn, Domain.create(ValueSet.ofRanges(Range.equal(INTEGER, 2L)), false))), TupleDomain.withColumnDomains(ImmutableMap.of(dsColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("2012-12-29"))), false), fileFormatColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("rctext"))), false), dummyColumn, Domain.create(ValueSet.ofRanges(Range.equal(INTEGER, 3L)), false))), TupleDomain.withColumnDomains(ImmutableMap.of(dsColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("2012-12-29"))), false), fileFormatColumn, Domain.create(ValueSet.ofRanges(Range.equal(createUnboundedVarcharType(), utf8Slice("rcbinary"))), false), dummyColumn, Domain.create(ValueSet.ofRanges(Range.equal(INTEGER, 4L)), false)))))), ImmutableList.of());
tableUnpartitionedProperties = new ConnectorTableProperties();
}
use of io.trino.spi.predicate.NullableValue in project trino by trinodb.
the class CassandraSession method getPartitions.
/**
* Get the list of partitions matching the given filters on partition keys.
*
* @param table the table to get partitions from
* @param filterPrefixes the list of possible values for each partition key.
* Order of values should match {@link CassandraTable#getPartitionKeyColumns()}
* @return list of {@link CassandraPartition}
*/
public List<CassandraPartition> getPartitions(CassandraTable table, List<Set<Object>> filterPrefixes) {
List<CassandraColumnHandle> partitionKeyColumns = table.getPartitionKeyColumns();
if (filterPrefixes.size() != partitionKeyColumns.size()) {
return ImmutableList.of(CassandraPartition.UNPARTITIONED);
}
Iterable<Row> rows;
if (getCassandraVersion().compareTo(PARTITION_FETCH_WITH_IN_PREDICATE_VERSION) > 0) {
log.debug("Using IN predicate to fetch partitions.");
rows = queryPartitionKeysWithInClauses(table, filterPrefixes);
} else {
log.debug("Using combination of partition values to fetch partitions.");
rows = queryPartitionKeysLegacyWithMultipleQueries(table, filterPrefixes);
}
if (rows == null) {
// just split the whole partition range
return ImmutableList.of(CassandraPartition.UNPARTITIONED);
}
ByteBuffer buffer = ByteBuffer.allocate(1000);
HashMap<ColumnHandle, NullableValue> map = new HashMap<>();
Set<String> uniquePartitionIds = new HashSet<>();
StringBuilder stringBuilder = new StringBuilder();
boolean isComposite = partitionKeyColumns.size() > 1;
ImmutableList.Builder<CassandraPartition> partitions = ImmutableList.builder();
for (Row row : rows) {
buffer.clear();
map.clear();
stringBuilder.setLength(0);
for (int i = 0; i < partitionKeyColumns.size(); i++) {
ByteBuffer component = row.getBytesUnsafe(i);
if (isComposite) {
// build composite key
short len = (short) component.limit();
buffer.putShort(len);
buffer.put(component);
buffer.put((byte) 0);
} else {
buffer.put(component);
}
CassandraColumnHandle columnHandle = partitionKeyColumns.get(i);
NullableValue keyPart = columnHandle.getCassandraType().getColumnValue(row, i);
map.put(columnHandle, keyPart);
if (i > 0) {
stringBuilder.append(" AND ");
}
stringBuilder.append(CassandraCqlUtils.validColumnName(columnHandle.getName()));
stringBuilder.append(" = ");
stringBuilder.append(columnHandle.getCassandraType().getColumnValueForCql(row, i));
}
buffer.flip();
byte[] key = new byte[buffer.limit()];
buffer.get(key);
TupleDomain<ColumnHandle> tupleDomain = TupleDomain.fromFixedValues(map);
String partitionId = stringBuilder.toString();
if (uniquePartitionIds.add(partitionId)) {
partitions.add(new CassandraPartition(key, partitionId, tupleDomain, false));
}
}
return partitions.build();
}
use of io.trino.spi.predicate.NullableValue in project trino by trinodb.
the class CassandraType method buildTupleValue.
private Block buildTupleValue(GettableByIndexData row, int position) {
verify(this.kind == Kind.TUPLE, "Not a TUPLE type");
TupleValue tupleValue = row.getTupleValue(position);
RowBlockBuilder blockBuilder = (RowBlockBuilder) this.trinoType.createBlockBuilder(null, 1);
SingleRowBlockWriter singleRowBlockWriter = blockBuilder.beginBlockEntry();
int tuplePosition = 0;
for (CassandraType argumentType : this.getArgumentTypes()) {
int finalTuplePosition = tuplePosition;
NullableValue value = argumentType.getColumnValue(tupleValue, tuplePosition, () -> tupleValue.getType().getComponentTypes().get(finalTuplePosition));
writeNativeValue(argumentType.getTrinoType(), singleRowBlockWriter, value.getValue());
tuplePosition++;
}
// can I just return singleRowBlockWriter here? It extends AbstractSingleRowBlock and tests pass.
blockBuilder.closeEntry();
return (Block) this.trinoType.getObject(blockBuilder, 0);
}
Aggregations