use of org.apache.flink.table.types.logical.LogicalType in project flink by apache.
the class HiveParserUtils method getGenericUDAFEvaluator.
// Returns the GenericUDAFEvaluator for the aggregation. This is called once for each GroupBy
// aggregation.
// TODO: Requiring a GenericUDAFEvaluator means we only support hive UDAFs. Need to avoid this
// to support flink UDAFs.
public static GenericUDAFEvaluator getGenericUDAFEvaluator(String aggName, ArrayList<ExprNodeDesc> aggParameters, HiveParserASTNode aggTree, boolean isDistinct, boolean isAllColumns, SqlOperatorTable opTable) throws SemanticException {
ArrayList<ObjectInspector> originalParameterTypeInfos = getWritableObjectInspector(aggParameters);
GenericUDAFEvaluator result = FunctionRegistry.getGenericUDAFEvaluator(aggName, originalParameterTypeInfos, isDistinct, isAllColumns);
if (result == null) {
// this happens for temp functions
SqlOperator sqlOperator = getSqlOperator(aggName, opTable, SqlFunctionCategory.USER_DEFINED_FUNCTION);
if (sqlOperator instanceof HiveAggSqlFunction) {
HiveGenericUDAF hiveGenericUDAF = (HiveGenericUDAF) ((HiveAggSqlFunction) sqlOperator).makeFunction(new Object[0], new LogicalType[0]);
result = hiveGenericUDAF.createEvaluator(originalParameterTypeInfos.toArray(new ObjectInspector[0]));
}
}
if (null == result) {
String reason = "Looking for UDAF Evaluator\"" + aggName + "\" with parameters " + originalParameterTypeInfos;
throw new SemanticException(HiveParserErrorMsg.getMsg(ErrorMsg.INVALID_FUNCTION_SIGNATURE, aggTree.getChild(0), reason));
}
return result;
}
use of org.apache.flink.table.types.logical.LogicalType in project flink by apache.
the class HiveTypeUtil method toHiveTypeInfo.
/**
* Convert Flink DataType to Hive TypeInfo. For types with a precision parameter, e.g.
* timestamp, the supported precisions in Hive and Flink can be different. Therefore the
* conversion will fail for those types if the precision is not supported by Hive and
* checkPrecision is true.
*
* @param dataType a Flink DataType
* @param checkPrecision whether to fail the conversion if the precision of the DataType is not
* supported by Hive
* @return the corresponding Hive data type
*/
public static TypeInfo toHiveTypeInfo(DataType dataType, boolean checkPrecision) {
checkNotNull(dataType, "type cannot be null");
LogicalType logicalType = dataType.getLogicalType();
return logicalType.accept(new TypeInfoLogicalTypeVisitor(dataType, checkPrecision));
}
use of org.apache.flink.table.types.logical.LogicalType in project flink by apache.
the class ArrowUtils method createArrowFieldWriterForArray.
private static ArrowFieldWriter<ArrayData> createArrowFieldWriterForArray(ValueVector vector, LogicalType fieldType) {
if (vector instanceof TinyIntVector) {
return TinyIntWriter.forArray((TinyIntVector) vector);
} else if (vector instanceof SmallIntVector) {
return SmallIntWriter.forArray((SmallIntVector) vector);
} else if (vector instanceof IntVector) {
return IntWriter.forArray((IntVector) vector);
} else if (vector instanceof BigIntVector) {
return BigIntWriter.forArray((BigIntVector) vector);
} else if (vector instanceof BitVector) {
return BooleanWriter.forArray((BitVector) vector);
} else if (vector instanceof Float4Vector) {
return FloatWriter.forArray((Float4Vector) vector);
} else if (vector instanceof Float8Vector) {
return DoubleWriter.forArray((Float8Vector) vector);
} else if (vector instanceof VarCharVector) {
return VarCharWriter.forArray((VarCharVector) vector);
} else if (vector instanceof VarBinaryVector) {
return VarBinaryWriter.forArray((VarBinaryVector) vector);
} else if (vector instanceof DecimalVector) {
DecimalVector decimalVector = (DecimalVector) vector;
return DecimalWriter.forArray(decimalVector, getPrecision(decimalVector), decimalVector.getScale());
} else if (vector instanceof DateDayVector) {
return DateWriter.forArray((DateDayVector) vector);
} else if (vector instanceof TimeSecVector || vector instanceof TimeMilliVector || vector instanceof TimeMicroVector || vector instanceof TimeNanoVector) {
return TimeWriter.forArray(vector);
} else if (vector instanceof TimeStampVector && ((ArrowType.Timestamp) vector.getField().getType()).getTimezone() == null) {
int precision;
if (fieldType instanceof LocalZonedTimestampType) {
precision = ((LocalZonedTimestampType) fieldType).getPrecision();
} else {
precision = ((TimestampType) fieldType).getPrecision();
}
return TimestampWriter.forArray(vector, precision);
} else if (vector instanceof ListVector) {
ListVector listVector = (ListVector) vector;
LogicalType elementType = ((ArrayType) fieldType).getElementType();
return ArrayWriter.forArray(listVector, createArrowFieldWriterForArray(listVector.getDataVector(), elementType));
} else if (vector instanceof StructVector) {
RowType rowType = (RowType) fieldType;
ArrowFieldWriter<RowData>[] fieldsWriters = new ArrowFieldWriter[rowType.getFieldCount()];
for (int i = 0; i < fieldsWriters.length; i++) {
fieldsWriters[i] = createArrowFieldWriterForRow(((StructVector) vector).getVectorById(i), rowType.getTypeAt(i));
}
return RowWriter.forArray((StructVector) vector, fieldsWriters);
} else {
throw new UnsupportedOperationException(String.format("Unsupported type %s.", fieldType));
}
}
use of org.apache.flink.table.types.logical.LogicalType in project flink by apache.
the class ElasticsearchDynamicSinkFactoryBase method getPrimaryKeyLogicalTypesWithIndex.
List<LogicalTypeWithIndex> getPrimaryKeyLogicalTypesWithIndex(Context context) {
DataType physicalRowDataType = context.getPhysicalRowDataType();
int[] primaryKeyIndexes = context.getPrimaryKeyIndexes();
if (primaryKeyIndexes.length != 0) {
DataType pkDataType = Projection.of(primaryKeyIndexes).project(physicalRowDataType);
ElasticsearchValidationUtils.validatePrimaryKey(pkDataType);
}
ResolvedSchema resolvedSchema = context.getCatalogTable().getResolvedSchema();
return Arrays.stream(primaryKeyIndexes).mapToObj(index -> {
Optional<Column> column = resolvedSchema.getColumn(index);
if (!column.isPresent()) {
throw new IllegalStateException(String.format("No primary key column found with index '%s'.", index));
}
LogicalType logicalType = column.get().getDataType().getLogicalType();
return new LogicalTypeWithIndex(index, logicalType);
}).collect(Collectors.toList());
}
use of org.apache.flink.table.types.logical.LogicalType in project flink by apache.
the class OrcNoHiveColumnarRowInputFormat method createPartitionedFormat.
/**
* Create a partitioned {@link OrcColumnarRowInputFormat}, the partition columns can be
* generated by split.
*/
public static <SplitT extends FileSourceSplit> OrcColumnarRowInputFormat<VectorizedRowBatch, SplitT> createPartitionedFormat(Configuration hadoopConfig, RowType tableType, List<String> partitionKeys, PartitionFieldExtractor<SplitT> extractor, int[] selectedFields, List<OrcFilters.Predicate> conjunctPredicates, int batchSize, Function<RowType, TypeInformation<RowData>> rowTypeInfoFactory) {
// TODO FLINK-25113 all this partition keys code should be pruned from the orc format,
// because now FileSystemTableSource uses FileInfoExtractorBulkFormat for reading partition
// keys.
String[] tableFieldNames = tableType.getFieldNames().toArray(new String[0]);
LogicalType[] tableFieldTypes = tableType.getChildren().toArray(new LogicalType[0]);
List<String> orcFieldNames = getNonPartNames(tableFieldNames, partitionKeys);
int[] orcSelectedFields = getSelectedOrcFields(tableFieldNames, selectedFields, orcFieldNames);
ColumnBatchFactory<VectorizedRowBatch, SplitT> batchGenerator = (SplitT split, VectorizedRowBatch rowBatch) -> {
// create and initialize the row batch
ColumnVector[] vectors = new ColumnVector[selectedFields.length];
for (int i = 0; i < vectors.length; i++) {
String name = tableFieldNames[selectedFields[i]];
LogicalType type = tableFieldTypes[selectedFields[i]];
vectors[i] = partitionKeys.contains(name) ? createFlinkVectorFromConstant(type, extractor.extract(split, name, type), batchSize) : createFlinkVector(rowBatch.cols[orcFieldNames.indexOf(name)]);
}
return new VectorizedColumnBatch(vectors);
};
return new OrcColumnarRowInputFormat<>(new OrcNoHiveShim(), hadoopConfig, convertToOrcTypeWithPart(tableFieldNames, tableFieldTypes, partitionKeys), orcSelectedFields, conjunctPredicates, batchSize, batchGenerator, rowTypeInfoFactory.apply(new RowType(Arrays.stream(selectedFields).mapToObj(i -> tableType.getFields().get(i)).collect(Collectors.toList()))));
}
Aggregations