use of io.trino.spi.predicate.NullableValue in project trino by trinodb.
the class LocalExecutionPlanner method plan.
public LocalExecutionPlan plan(TaskContext taskContext, PlanNode plan, TypeProvider types, PartitioningScheme partitioningScheme, StageExecutionDescriptor stageExecutionDescriptor, List<PlanNodeId> partitionedSourceOrder, OutputBuffer outputBuffer) {
List<Symbol> outputLayout = partitioningScheme.getOutputLayout();
if (partitioningScheme.getPartitioning().getHandle().equals(FIXED_BROADCAST_DISTRIBUTION) || partitioningScheme.getPartitioning().getHandle().equals(FIXED_ARBITRARY_DISTRIBUTION) || partitioningScheme.getPartitioning().getHandle().equals(SCALED_WRITER_DISTRIBUTION) || partitioningScheme.getPartitioning().getHandle().equals(SINGLE_DISTRIBUTION) || partitioningScheme.getPartitioning().getHandle().equals(COORDINATOR_DISTRIBUTION)) {
return plan(taskContext, stageExecutionDescriptor, plan, outputLayout, types, partitionedSourceOrder, new TaskOutputFactory(outputBuffer));
}
// We can convert the symbols directly into channels, because the root must be a sink and therefore the layout is fixed
List<Integer> partitionChannels;
List<Optional<NullableValue>> partitionConstants;
List<Type> partitionChannelTypes;
if (partitioningScheme.getHashColumn().isPresent()) {
partitionChannels = ImmutableList.of(outputLayout.indexOf(partitioningScheme.getHashColumn().get()));
partitionConstants = ImmutableList.of(Optional.empty());
partitionChannelTypes = ImmutableList.of(BIGINT);
} else {
partitionChannels = partitioningScheme.getPartitioning().getArguments().stream().map(argument -> {
if (argument.isConstant()) {
return -1;
}
return outputLayout.indexOf(argument.getColumn());
}).collect(toImmutableList());
partitionConstants = partitioningScheme.getPartitioning().getArguments().stream().map(argument -> {
if (argument.isConstant()) {
return Optional.of(argument.getConstant());
}
return Optional.<NullableValue>empty();
}).collect(toImmutableList());
partitionChannelTypes = partitioningScheme.getPartitioning().getArguments().stream().map(argument -> {
if (argument.isConstant()) {
return argument.getConstant().getType();
}
return types.get(argument.getColumn());
}).collect(toImmutableList());
}
PartitionFunction partitionFunction = nodePartitioningManager.getPartitionFunction(taskContext.getSession(), partitioningScheme, partitionChannelTypes);
OptionalInt nullChannel = OptionalInt.empty();
Set<Symbol> partitioningColumns = partitioningScheme.getPartitioning().getColumns();
// partitioningColumns expected to have one column in the normal case, and zero columns when partitioning on a constant
checkArgument(!partitioningScheme.isReplicateNullsAndAny() || partitioningColumns.size() <= 1);
if (partitioningScheme.isReplicateNullsAndAny() && partitioningColumns.size() == 1) {
nullChannel = OptionalInt.of(outputLayout.indexOf(getOnlyElement(partitioningColumns)));
}
return plan(taskContext, stageExecutionDescriptor, plan, outputLayout, types, partitionedSourceOrder, new PartitionedOutputFactory(partitionFunction, partitionChannels, partitionConstants, partitioningScheme.isReplicateNullsAndAny(), nullChannel, outputBuffer, maxPagePartitioningBufferSize, positionsAppenderFactory));
}
use of io.trino.spi.predicate.NullableValue in project trino by trinodb.
the class HivePartitionManager method partitionMatches.
public static boolean partitionMatches(List<HiveColumnHandle> partitionColumns, TupleDomain<ColumnHandle> constraintSummary, HivePartition partition) {
if (constraintSummary.isNone()) {
return false;
}
Map<ColumnHandle, Domain> domains = constraintSummary.getDomains().get();
for (HiveColumnHandle column : partitionColumns) {
NullableValue value = partition.getKeys().get(column);
Domain allowedDomain = domains.get(column);
if (allowedDomain != null && !allowedDomain.includesNullableValue(value.getValue())) {
return false;
}
}
return true;
}
use of io.trino.spi.predicate.NullableValue in project trino by trinodb.
the class JmxSplitManager method getSplits.
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableHandle table, SplitSchedulingStrategy splitSchedulingStrategy, DynamicFilter dynamicFilter) {
JmxTableHandle tableHandle = (JmxTableHandle) table;
// TODO is there a better way to get the node column?
Optional<JmxColumnHandle> nodeColumnHandle = tableHandle.getColumnHandles().stream().filter(jmxColumnHandle -> jmxColumnHandle.getColumnName().equals(NODE_COLUMN_NAME)).findFirst();
checkState(nodeColumnHandle.isPresent(), "Failed to find %s column", NODE_COLUMN_NAME);
TupleDomain<ColumnHandle> nodeFilter = tableHandle.getNodeFilter();
List<ConnectorSplit> splits = nodeManager.getAllNodes().stream().filter(node -> {
NullableValue value = NullableValue.of(createUnboundedVarcharType(), utf8Slice(node.getNodeIdentifier()));
return nodeFilter.overlaps(fromFixedValues(ImmutableMap.of(nodeColumnHandle.get(), value)));
}).map(node -> new JmxSplit(ImmutableList.of(node.getHostAndPort()))).collect(toList());
return new FixedSplitSource(splits);
}
use of io.trino.spi.predicate.NullableValue in project trino by trinodb.
the class TestRaptorStorageManager method testReader.
@Test
public void testReader() throws Exception {
RaptorStorageManager manager = createRaptorStorageManager();
List<Long> columnIds = ImmutableList.of(2L, 4L, 6L, 7L, 8L, 9L);
List<Type> columnTypes = ImmutableList.of(BIGINT, createVarcharType(10), VARBINARY, DATE, BOOLEAN, DOUBLE);
byte[] bytes1 = octets(0x00, 0xFE, 0xFF);
byte[] bytes3 = octets(0x01, 0x02, 0x19, 0x80);
StoragePageSink sink = createStoragePageSink(manager, columnIds, columnTypes);
Object[][] doubles = { { 881L, "-inf", null, null, null, Double.NEGATIVE_INFINITY }, { 882L, "+inf", null, null, null, Double.POSITIVE_INFINITY }, { 883L, "nan", null, null, null, Double.NaN }, { 884L, "min", null, null, null, Double.MIN_VALUE }, { 885L, "max", null, null, null, Double.MAX_VALUE }, { 886L, "pzero", null, null, null, 0.0 }, { 887L, "nzero", null, null, null, -0.0 } };
List<Page> pages = rowPagesBuilder(columnTypes).row(123L, "hello", wrappedBuffer(bytes1), sqlDate(2001, 8, 22).getDays(), true, 123.45).row(null, null, null, null, null, null).row(456L, "bye", wrappedBuffer(bytes3), sqlDate(2005, 4, 22).getDays(), false, 987.65).rows(doubles).build();
sink.appendPages(pages);
List<ShardInfo> shards = getFutureValue(sink.commit());
assertEquals(shards.size(), 1);
UUID uuid = Iterables.getOnlyElement(shards).getShardUuid();
MaterializedResult expected = resultBuilder(SESSION, columnTypes).row(123L, "hello", sqlBinary(bytes1), sqlDate(2001, 8, 22), true, 123.45).row(null, null, null, null, null, null).row(456L, "bye", sqlBinary(bytes3), sqlDate(2005, 4, 22), false, 987.65).rows(doubles).build();
// no tuple domain (all)
TupleDomain<RaptorColumnHandle> tupleDomain = TupleDomain.all();
try (ConnectorPageSource pageSource = getPageSource(manager, columnIds, columnTypes, uuid, tupleDomain)) {
MaterializedResult result = materializeSourceDataStream(SESSION, pageSource, columnTypes);
assertEquals(result.getRowCount(), expected.getRowCount());
assertEquals(result, expected);
}
// tuple domain within the column range
tupleDomain = TupleDomain.fromFixedValues(ImmutableMap.<RaptorColumnHandle, NullableValue>builder().put(new RaptorColumnHandle("c1", 2, BIGINT), NullableValue.of(BIGINT, 124L)).buildOrThrow());
try (ConnectorPageSource pageSource = getPageSource(manager, columnIds, columnTypes, uuid, tupleDomain)) {
MaterializedResult result = materializeSourceDataStream(SESSION, pageSource, columnTypes);
assertEquals(result.getRowCount(), expected.getRowCount());
}
// tuple domain outside the column range
tupleDomain = TupleDomain.fromFixedValues(ImmutableMap.<RaptorColumnHandle, NullableValue>builder().put(new RaptorColumnHandle("c1", 2, BIGINT), NullableValue.of(BIGINT, 122L)).buildOrThrow());
try (ConnectorPageSource pageSource = getPageSource(manager, columnIds, columnTypes, uuid, tupleDomain)) {
MaterializedResult result = materializeSourceDataStream(SESSION, pageSource, columnTypes);
assertEquals(result.getRowCount(), 0);
}
}
use of io.trino.spi.predicate.NullableValue in project trino by trinodb.
the class TableStatsSystemTable method buildPages.
private static List<Page> buildPages(MetadataDao dao, TupleDomain<Integer> tupleDomain) {
Map<Integer, NullableValue> domainValues = extractFixedValues(tupleDomain).orElse(ImmutableMap.of());
String schemaName = getStringValue(domainValues.get(getColumnIndex(METADATA, SCHEMA_NAME)));
String tableName = getStringValue(domainValues.get(getColumnIndex(METADATA, TABLE_NAME)));
PageListBuilder pageBuilder = new PageListBuilder(METADATA.getColumns().stream().map(ColumnMetadata::getType).collect(toList()));
for (TableStatsRow row : dao.getTableStatsRows(schemaName, tableName)) {
pageBuilder.beginRow();
VARCHAR.writeSlice(pageBuilder.nextBlockBuilder(), utf8Slice(row.getSchemaName()));
VARCHAR.writeSlice(pageBuilder.nextBlockBuilder(), utf8Slice(row.getTableName()));
TIMESTAMP_MILLIS.writeLong(pageBuilder.nextBlockBuilder(), row.getCreateTime() * MICROSECONDS_PER_MILLISECOND);
TIMESTAMP_MILLIS.writeLong(pageBuilder.nextBlockBuilder(), row.getUpdateTime() * MICROSECONDS_PER_MILLISECOND);
BIGINT.writeLong(pageBuilder.nextBlockBuilder(), row.getTableVersion());
BIGINT.writeLong(pageBuilder.nextBlockBuilder(), row.getShardCount());
BIGINT.writeLong(pageBuilder.nextBlockBuilder(), row.getRowCount());
BIGINT.writeLong(pageBuilder.nextBlockBuilder(), row.getCompressedSize());
BIGINT.writeLong(pageBuilder.nextBlockBuilder(), row.getUncompressedSize());
}
return pageBuilder.build();
}
Aggregations