use of io.trino.spi.type.VarbinaryType.VARBINARY in project trino by trinodb.
the class LogicalPlanner method createTableWriterPlan.
private RelationPlan createTableWriterPlan(Analysis analysis, PlanNode source, List<Symbol> symbols, WriterTarget target, List<String> columnNames, List<ColumnMetadata> columnMetadataList, Optional<TableLayout> writeTableLayout, TableStatisticsMetadata statisticsMetadata) {
Optional<PartitioningScheme> partitioningScheme = Optional.empty();
Optional<PartitioningScheme> preferredPartitioningScheme = Optional.empty();
if (writeTableLayout.isPresent()) {
List<Symbol> partitionFunctionArguments = new ArrayList<>();
writeTableLayout.get().getPartitionColumns().stream().mapToInt(columnNames::indexOf).mapToObj(symbols::get).forEach(partitionFunctionArguments::add);
List<Symbol> outputLayout = new ArrayList<>(symbols);
Optional<PartitioningHandle> partitioningHandle = writeTableLayout.get().getPartitioning();
if (partitioningHandle.isPresent()) {
partitioningScheme = Optional.of(new PartitioningScheme(Partitioning.create(partitioningHandle.get(), partitionFunctionArguments), outputLayout));
} else {
// empty connector partitioning handle means evenly partitioning on partitioning columns
preferredPartitioningScheme = Optional.of(new PartitioningScheme(Partitioning.create(FIXED_HASH_DISTRIBUTION, partitionFunctionArguments), outputLayout));
}
}
verify(columnNames.size() == symbols.size(), "columnNames.size() != symbols.size(): %s and %s", columnNames, symbols);
Map<String, Symbol> columnToSymbolMap = zip(columnNames.stream(), symbols.stream(), SimpleImmutableEntry::new).collect(toImmutableMap(Entry::getKey, Entry::getValue));
Set<Symbol> notNullColumnSymbols = columnMetadataList.stream().filter(column -> !column.isNullable()).map(ColumnMetadata::getName).map(columnToSymbolMap::get).collect(toImmutableSet());
if (!statisticsMetadata.isEmpty()) {
TableStatisticAggregation result = statisticsAggregationPlanner.createStatisticsAggregation(statisticsMetadata, columnToSymbolMap);
StatisticAggregations.Parts aggregations = result.getAggregations().createPartialAggregations(symbolAllocator, plannerContext);
// partial aggregation is run within the TableWriteOperator to calculate the statistics for
// the data consumed by the TableWriteOperator
// final aggregation is run within the TableFinishOperator to summarize collected statistics
// by the partial aggregation from all of the writer nodes
StatisticAggregations partialAggregation = aggregations.getPartialAggregation();
TableFinishNode commitNode = new TableFinishNode(idAllocator.getNextId(), new TableWriterNode(idAllocator.getNextId(), source, target, symbolAllocator.newSymbol("partialrows", BIGINT), symbolAllocator.newSymbol("fragment", VARBINARY), symbols, columnNames, notNullColumnSymbols, partitioningScheme, preferredPartitioningScheme, Optional.of(partialAggregation), Optional.of(result.getDescriptor().map(aggregations.getMappings()::get))), target, symbolAllocator.newSymbol("rows", BIGINT), Optional.of(aggregations.getFinalAggregation()), Optional.of(result.getDescriptor()));
return new RelationPlan(commitNode, analysis.getRootScope(), commitNode.getOutputSymbols(), Optional.empty());
}
TableFinishNode commitNode = new TableFinishNode(idAllocator.getNextId(), new TableWriterNode(idAllocator.getNextId(), source, target, symbolAllocator.newSymbol("partialrows", BIGINT), symbolAllocator.newSymbol("fragment", VARBINARY), symbols, columnNames, notNullColumnSymbols, partitioningScheme, preferredPartitioningScheme, Optional.empty(), Optional.empty()), target, symbolAllocator.newSymbol("rows", BIGINT), Optional.empty(), Optional.empty());
return new RelationPlan(commitNode, analysis.getRootScope(), commitNode.getOutputSymbols(), Optional.empty());
}
use of io.trino.spi.type.VarbinaryType.VARBINARY in project trino by trinodb.
the class QueryPlanner method plan.
public UpdateNode plan(Update node) {
Table table = node.getTable();
TableHandle handle = analysis.getTableHandle(table);
TableSchema tableSchema = plannerContext.getMetadata().getTableSchema(session, handle);
Map<String, ColumnHandle> columnMap = plannerContext.getMetadata().getColumnHandles(session, handle);
List<ColumnSchema> columnsSchemas = tableSchema.getColumns();
List<String> targetColumnNames = node.getAssignments().stream().map(assignment -> assignment.getName().getValue()).collect(toImmutableList());
// Create lists of columnnames and SET expressions, in table column order
ImmutableList.Builder<String> updatedColumnNamesBuilder = ImmutableList.builder();
ImmutableList.Builder<ColumnHandle> updatedColumnHandlesBuilder = ImmutableList.builder();
ImmutableList.Builder<Expression> orderedColumnValuesBuilder = ImmutableList.builder();
for (ColumnSchema columnSchema : columnsSchemas) {
String name = columnSchema.getName();
int index = targetColumnNames.indexOf(name);
if (index >= 0) {
updatedColumnNamesBuilder.add(name);
updatedColumnHandlesBuilder.add(requireNonNull(columnMap.get(name), "columnMap didn't contain name"));
orderedColumnValuesBuilder.add(node.getAssignments().get(index).getValue());
}
}
List<String> updatedColumnNames = updatedColumnNamesBuilder.build();
List<ColumnHandle> updatedColumnHandles = updatedColumnHandlesBuilder.build();
List<Expression> orderedColumnValues = orderedColumnValuesBuilder.build();
// create table scan
RelationPlan relationPlan = new RelationPlanner(analysis, symbolAllocator, idAllocator, lambdaDeclarationToSymbolMap, plannerContext, outerContext, session, recursiveSubqueries).process(table, null);
PlanBuilder builder = newPlanBuilder(relationPlan, analysis, lambdaDeclarationToSymbolMap);
if (node.getWhere().isPresent()) {
builder = filter(builder, node.getWhere().get(), node);
}
builder = subqueryPlanner.handleSubqueries(builder, orderedColumnValues, analysis.getSubqueries(node));
builder = builder.appendProjections(orderedColumnValues, symbolAllocator, idAllocator);
PlanAndMappings planAndMappings = coerce(builder, orderedColumnValues, analysis, idAllocator, symbolAllocator, typeCoercion);
builder = planAndMappings.getSubPlan();
ImmutableList.Builder<Symbol> updatedColumnValuesBuilder = ImmutableList.builder();
orderedColumnValues.forEach(columnValue -> updatedColumnValuesBuilder.add(planAndMappings.get(columnValue)));
Symbol rowId = builder.translate(analysis.getRowIdField(table));
updatedColumnValuesBuilder.add(rowId);
List<Symbol> outputs = ImmutableList.of(symbolAllocator.newSymbol("partialrows", BIGINT), symbolAllocator.newSymbol("fragment", VARBINARY));
Optional<PlanNodeId> tableScanId = getIdForLeftTableScan(relationPlan.getRoot());
checkArgument(tableScanId.isPresent(), "tableScanId not present");
// create update node
return new UpdateNode(idAllocator.getNextId(), builder.getRoot(), new UpdateTarget(Optional.empty(), plannerContext.getMetadata().getTableMetadata(session, handle).getTable(), updatedColumnNames, updatedColumnHandles), rowId, updatedColumnValuesBuilder.build(), outputs);
}
use of io.trino.spi.type.VarbinaryType.VARBINARY in project trino by trinodb.
the class TestCreateTableTask method testCreateWithNotNullColumns.
@Test
public void testCreateWithNotNullColumns() {
metadata.setConnectorCapabilities(NOT_NULL_COLUMN_CONSTRAINT);
List<TableElement> inputColumns = ImmutableList.of(new ColumnDefinition(identifier("a"), toSqlType(DATE), true, emptyList(), Optional.empty()), new ColumnDefinition(identifier("b"), toSqlType(VARCHAR), false, emptyList(), Optional.empty()), new ColumnDefinition(identifier("c"), toSqlType(VARBINARY), false, emptyList(), Optional.empty()));
CreateTable statement = new CreateTable(QualifiedName.of("test_table"), inputColumns, true, ImmutableList.of(), Optional.empty());
CreateTableTask createTableTask = new CreateTableTask(plannerContext, new AllowAllAccessControl(), columnPropertyManager, tablePropertyManager);
getFutureValue(createTableTask.internalExecute(statement, testSession, emptyList(), output -> {
}));
assertEquals(metadata.getCreateTableCallCount(), 1);
List<ColumnMetadata> columns = metadata.getReceivedTableMetadata().get(0).getColumns();
assertEquals(columns.size(), 3);
assertEquals(columns.get(0).getName(), "a");
assertEquals(columns.get(0).getType().getDisplayName().toUpperCase(ENGLISH), "DATE");
assertTrue(columns.get(0).isNullable());
assertEquals(columns.get(1).getName(), "b");
assertEquals(columns.get(1).getType().getDisplayName().toUpperCase(ENGLISH), "VARCHAR");
assertFalse(columns.get(1).isNullable());
assertEquals(columns.get(2).getName(), "c");
assertEquals(columns.get(2).getType().getDisplayName().toUpperCase(ENGLISH), "VARBINARY");
assertFalse(columns.get(2).isNullable());
}
use of io.trino.spi.type.VarbinaryType.VARBINARY in project trino by trinodb.
the class RcFileTester method preprocessWriteValueOld.
private static Object preprocessWriteValueOld(Format format, Type type, Object value) {
if (value == null) {
return null;
}
if (type.equals(BOOLEAN)) {
return value;
}
if (type.equals(TINYINT)) {
return ((Number) value).byteValue();
}
if (type.equals(SMALLINT)) {
return ((Number) value).shortValue();
}
if (type.equals(INTEGER)) {
return ((Number) value).intValue();
}
if (type.equals(BIGINT)) {
return ((Number) value).longValue();
}
if (type.equals(REAL)) {
return ((Number) value).floatValue();
}
if (type.equals(DOUBLE)) {
return ((Number) value).doubleValue();
}
if (type instanceof VarcharType) {
return value;
}
if (type.equals(VARBINARY)) {
return ((SqlVarbinary) value).getBytes();
}
if (type.equals(DATE)) {
return Date.ofEpochDay(((SqlDate) value).getDays());
}
if (type.equals(TIMESTAMP_MILLIS)) {
long millis = ((SqlTimestamp) value).getMillis();
if (format == Format.BINARY) {
millis = HIVE_STORAGE_TIME_ZONE.convertLocalToUTC(millis, false);
}
return Timestamp.ofEpochMilli(millis);
}
if (type instanceof DecimalType) {
return HiveDecimal.create(((SqlDecimal) value).toBigDecimal());
}
if (type instanceof ArrayType) {
Type elementType = type.getTypeParameters().get(0);
return ((List<?>) value).stream().map(element -> preprocessWriteValueOld(format, elementType, element)).collect(toList());
}
if (type instanceof MapType) {
Type keyType = type.getTypeParameters().get(0);
Type valueType = type.getTypeParameters().get(1);
Map<Object, Object> newMap = new HashMap<>();
for (Entry<?, ?> entry : ((Map<?, ?>) value).entrySet()) {
newMap.put(preprocessWriteValueOld(format, keyType, entry.getKey()), preprocessWriteValueOld(format, valueType, entry.getValue()));
}
return newMap;
}
if (type instanceof RowType) {
List<?> fieldValues = (List<?>) value;
List<Type> fieldTypes = type.getTypeParameters();
List<Object> newStruct = new ArrayList<>();
for (int fieldId = 0; fieldId < fieldValues.size(); fieldId++) {
newStruct.add(preprocessWriteValueOld(format, fieldTypes.get(fieldId), fieldValues.get(fieldId)));
}
return newStruct;
}
throw new IllegalArgumentException("unsupported type: " + type);
}
use of io.trino.spi.type.VarbinaryType.VARBINARY in project trino by trinodb.
the class OrcTester method preprocessWriteValueHive.
private static Object preprocessWriteValueHive(Type type, Object value) {
if (value == null) {
return null;
}
if (type.equals(BOOLEAN)) {
return value;
}
if (type.equals(TINYINT)) {
return ((Number) value).byteValue();
}
if (type.equals(SMALLINT)) {
return ((Number) value).shortValue();
}
if (type.equals(INTEGER)) {
return ((Number) value).intValue();
}
if (type.equals(BIGINT)) {
return ((Number) value).longValue();
}
if (type.equals(REAL)) {
return ((Number) value).floatValue();
}
if (type.equals(DOUBLE)) {
return ((Number) value).doubleValue();
}
if (type instanceof VarcharType) {
return value;
}
if (type instanceof CharType) {
return new HiveChar((String) value, ((CharType) type).getLength());
}
if (type.equals(VARBINARY)) {
return ((SqlVarbinary) value).getBytes();
}
if (type.equals(DATE)) {
return Date.ofEpochDay(((SqlDate) value).getDays());
}
if (type.equals(TIMESTAMP_MILLIS) || type.equals(TIMESTAMP_MICROS) || type.equals(TIMESTAMP_NANOS)) {
LocalDateTime dateTime = ((SqlTimestamp) value).toLocalDateTime();
return Timestamp.ofEpochSecond(dateTime.toEpochSecond(ZoneOffset.UTC), dateTime.getNano());
}
if (type.equals(TIMESTAMP_TZ_MILLIS) || type.equals(TIMESTAMP_TZ_MICROS) || type.equals(TIMESTAMP_TZ_NANOS)) {
SqlTimestampWithTimeZone timestamp = (SqlTimestampWithTimeZone) value;
int nanosOfMilli = roundDiv(timestamp.getPicosOfMilli(), PICOSECONDS_PER_NANOSECOND);
return Timestamp.ofEpochMilli(timestamp.getEpochMillis(), nanosOfMilli);
}
if (type instanceof DecimalType) {
return HiveDecimal.create(((SqlDecimal) value).toBigDecimal());
}
if (type instanceof ArrayType) {
Type elementType = type.getTypeParameters().get(0);
return ((List<?>) value).stream().map(element -> preprocessWriteValueHive(elementType, element)).collect(toList());
}
if (type instanceof MapType) {
Type keyType = type.getTypeParameters().get(0);
Type valueType = type.getTypeParameters().get(1);
Map<Object, Object> newMap = new HashMap<>();
for (Entry<?, ?> entry : ((Map<?, ?>) value).entrySet()) {
newMap.put(preprocessWriteValueHive(keyType, entry.getKey()), preprocessWriteValueHive(valueType, entry.getValue()));
}
return newMap;
}
if (type instanceof RowType) {
List<?> fieldValues = (List<?>) value;
List<Type> fieldTypes = type.getTypeParameters();
List<Object> newStruct = new ArrayList<>();
for (int fieldId = 0; fieldId < fieldValues.size(); fieldId++) {
newStruct.add(preprocessWriteValueHive(fieldTypes.get(fieldId), fieldValues.get(fieldId)));
}
return newStruct;
}
throw new IllegalArgumentException("unsupported type: " + type);
}
Aggregations