use of org.apache.flink.table.api.TableException in project flink by apache.
the class HiveFunctionDefinitionFactory method createFunctionDefinitionFromHiveFunction.
/**
* Create a FunctionDefinition from a Hive function's class name. Called directly by {@link
* org.apache.flink.table.module.hive.HiveModule}.
*/
public FunctionDefinition createFunctionDefinitionFromHiveFunction(String name, String functionClassName) {
Class clazz;
try {
clazz = Thread.currentThread().getContextClassLoader().loadClass(functionClassName);
LOG.info("Successfully loaded Hive udf '{}' with class '{}'", name, functionClassName);
} catch (ClassNotFoundException e) {
throw new TableException(String.format("Failed to initiate an instance of class %s.", functionClassName), e);
}
if (UDF.class.isAssignableFrom(clazz)) {
LOG.info("Transforming Hive function '{}' into a HiveSimpleUDF", name);
return new HiveSimpleUDF(new HiveFunctionWrapper<>(functionClassName), hiveShim);
} else if (GenericUDF.class.isAssignableFrom(clazz)) {
LOG.info("Transforming Hive function '{}' into a HiveGenericUDF", name);
return new HiveGenericUDF(new HiveFunctionWrapper<>(functionClassName), hiveShim);
} else if (GenericUDTF.class.isAssignableFrom(clazz)) {
LOG.info("Transforming Hive function '{}' into a HiveGenericUDTF", name);
HiveGenericUDTF udtf = new HiveGenericUDTF(new HiveFunctionWrapper<>(functionClassName), hiveShim);
return new TableFunctionDefinition(name, udtf, GenericTypeInfo.of(Row.class));
} else if (GenericUDAFResolver2.class.isAssignableFrom(clazz) || UDAF.class.isAssignableFrom(clazz)) {
HiveGenericUDAF udaf;
if (GenericUDAFResolver2.class.isAssignableFrom(clazz)) {
LOG.info("Transforming Hive function '{}' into a HiveGenericUDAF without UDAF bridging", name);
udaf = new HiveGenericUDAF(new HiveFunctionWrapper<>(functionClassName), false, hiveShim);
} else {
LOG.info("Transforming Hive function '{}' into a HiveGenericUDAF with UDAF bridging", name);
udaf = new HiveGenericUDAF(new HiveFunctionWrapper<>(functionClassName), true, hiveShim);
}
return new AggregateFunctionDefinition(name, udaf, GenericTypeInfo.of(Object.class), GenericTypeInfo.of(GenericUDAFEvaluator.AggregationBuffer.class));
} else {
throw new IllegalArgumentException(String.format("HiveFunctionDefinitionFactory cannot initiate FunctionDefinition for class %s", functionClassName));
}
}
use of org.apache.flink.table.api.TableException in project flink by apache.
the class IndexGeneratorTest method testDynamicIndexFromSystemTime.
@Test
public void testDynamicIndexFromSystemTime() {
List<String> supportedUseCases = Arrays.asList("now()", "NOW()", "now( )", "NOW(\t)", "\t NOW( ) \t", "current_timestamp", "CURRENT_TIMESTAMP", "\tcurrent_timestamp\t", " current_timestamp ");
supportedUseCases.stream().forEach(f -> {
DateTimeFormatter dateTimeFormatter = DateTimeFormatter.ofPattern("yyyy_MM_dd");
IndexGenerator indexGenerator = IndexGeneratorFactory.createIndexGenerator(String.format("my-index-{%s|yyyy_MM_dd}", f), fieldNames, dataTypes);
indexGenerator.open();
// The date may change during the running of the unit test.
// Generate expected index-name based on the current time
// before and after calling the generate method.
String expectedIndex1 = "my-index-" + LocalDateTime.now().format(dateTimeFormatter);
String actualIndex = indexGenerator.generate(rows.get(1));
String expectedIndex2 = "my-index-" + LocalDateTime.now().format(dateTimeFormatter);
Assertions.assertTrue(actualIndex.equals(expectedIndex1) || actualIndex.equals(expectedIndex2));
});
List<String> invalidUseCases = Arrays.asList("now", "now(", "NOW", "NOW)", "current_timestamp()", "CURRENT_TIMESTAMP()", "CURRENT_timestamp");
invalidUseCases.stream().forEach(f -> {
String expectedExceptionMsg = String.format("Unknown field '%s' in index pattern 'my-index-{%s|yyyy_MM_dd}'," + " please check the field name.", f, f);
try {
IndexGenerator indexGenerator = IndexGeneratorFactory.createIndexGenerator(String.format("my-index-{%s|yyyy_MM_dd}", f), fieldNames, dataTypes);
indexGenerator.open();
} catch (TableException e) {
Assertions.assertEquals(expectedExceptionMsg, e.getMessage());
}
});
}
use of org.apache.flink.table.api.TableException in project flink by apache.
the class FileSystemTableSink method createStreamingSink.
private DataStreamSink<?> createStreamingSink(ProviderContext providerContext, DataStream<RowData> dataStream, Context sinkContext, final int parallelism) {
FileSystemFactory fsFactory = FileSystem::get;
RowDataPartitionComputer computer = partitionComputer();
boolean autoCompaction = tableOptions.getBoolean(AUTO_COMPACTION);
Object writer = createWriter(sinkContext);
boolean isEncoder = writer instanceof Encoder;
TableBucketAssigner assigner = new TableBucketAssigner(computer);
TableRollingPolicy rollingPolicy = new TableRollingPolicy(!isEncoder || autoCompaction, tableOptions.get(SINK_ROLLING_POLICY_FILE_SIZE).getBytes(), tableOptions.get(SINK_ROLLING_POLICY_ROLLOVER_INTERVAL).toMillis(), tableOptions.get(SINK_ROLLING_POLICY_INACTIVITY_INTERVAL).toMillis());
String randomPrefix = "part-" + UUID.randomUUID().toString();
OutputFileConfig.OutputFileConfigBuilder fileNamingBuilder = OutputFileConfig.builder();
fileNamingBuilder = autoCompaction ? fileNamingBuilder.withPartPrefix(convertToUncompacted(randomPrefix)) : fileNamingBuilder.withPartPrefix(randomPrefix);
OutputFileConfig fileNamingConfig = fileNamingBuilder.build();
BucketsBuilder<RowData, String, ? extends BucketsBuilder<RowData, ?, ?>> bucketsBuilder;
if (isEncoder) {
// noinspection unchecked
bucketsBuilder = StreamingFileSink.forRowFormat(path, new ProjectionEncoder((Encoder<RowData>) writer, computer)).withBucketAssigner(assigner).withOutputFileConfig(fileNamingConfig).withRollingPolicy(rollingPolicy);
} else {
// noinspection unchecked
bucketsBuilder = StreamingFileSink.forBulkFormat(path, new ProjectionBulkFactory((BulkWriter.Factory<RowData>) writer, computer)).withBucketAssigner(assigner).withOutputFileConfig(fileNamingConfig).withRollingPolicy(rollingPolicy);
}
long bucketCheckInterval = tableOptions.get(SINK_ROLLING_POLICY_CHECK_INTERVAL).toMillis();
DataStream<PartitionCommitInfo> writerStream;
if (autoCompaction) {
long compactionSize = tableOptions.getOptional(COMPACTION_FILE_SIZE).orElse(tableOptions.get(SINK_ROLLING_POLICY_FILE_SIZE)).getBytes();
CompactReader.Factory<RowData> reader = createCompactReaderFactory(sinkContext).orElseThrow(() -> new TableException("Please implement available reader for compaction:" + " BulkFormat, FileInputFormat."));
writerStream = StreamingSink.compactionWriter(providerContext, dataStream, bucketCheckInterval, bucketsBuilder, fsFactory, path, reader, compactionSize, parallelism);
} else {
writerStream = StreamingSink.writer(providerContext, dataStream, bucketCheckInterval, bucketsBuilder, parallelism, partitionKeys, tableOptions);
}
return StreamingSink.sink(providerContext, writerStream, path, tableIdentifier, partitionKeys, new EmptyMetaStoreFactory(path), fsFactory, tableOptions);
}
use of org.apache.flink.table.api.TableException in project flink by apache.
the class KafkaConnectorOptionsUtil method createValueFormatProjection.
/**
* Creates an array of indices that determine which physical fields of the table schema to
* include in the value format.
*
* <p>See {@link KafkaConnectorOptions#VALUE_FORMAT}, {@link
* KafkaConnectorOptions#VALUE_FIELDS_INCLUDE}, and {@link
* KafkaConnectorOptions#KEY_FIELDS_PREFIX} for more information.
*/
public static int[] createValueFormatProjection(ReadableConfig options, DataType physicalDataType) {
final LogicalType physicalType = physicalDataType.getLogicalType();
Preconditions.checkArgument(physicalType.is(LogicalTypeRoot.ROW), "Row data type expected.");
final int physicalFieldCount = LogicalTypeChecks.getFieldCount(physicalType);
final IntStream physicalFields = IntStream.range(0, physicalFieldCount);
final String keyPrefix = options.getOptional(KEY_FIELDS_PREFIX).orElse("");
final ValueFieldsStrategy strategy = options.get(VALUE_FIELDS_INCLUDE);
if (strategy == ValueFieldsStrategy.ALL) {
if (keyPrefix.length() > 0) {
throw new ValidationException(String.format("A key prefix is not allowed when option '%s' is set to '%s'. " + "Set it to '%s' instead to avoid field overlaps.", VALUE_FIELDS_INCLUDE.key(), ValueFieldsStrategy.ALL, ValueFieldsStrategy.EXCEPT_KEY));
}
return physicalFields.toArray();
} else if (strategy == ValueFieldsStrategy.EXCEPT_KEY) {
final int[] keyProjection = createKeyFormatProjection(options, physicalDataType);
return physicalFields.filter(pos -> IntStream.of(keyProjection).noneMatch(k -> k == pos)).toArray();
}
throw new TableException("Unknown value fields strategy:" + strategy);
}
use of org.apache.flink.table.api.TableException in project flink by apache.
the class JsonToRowDataConverters method convertToTimestamp.
private TimestampData convertToTimestamp(JsonNode jsonNode) {
TemporalAccessor parsedTimestamp;
switch(timestampFormat) {
case SQL:
parsedTimestamp = SQL_TIMESTAMP_FORMAT.parse(jsonNode.asText());
break;
case ISO_8601:
parsedTimestamp = ISO8601_TIMESTAMP_FORMAT.parse(jsonNode.asText());
break;
default:
throw new TableException(String.format("Unsupported timestamp format '%s'. Validator should have checked that.", timestampFormat));
}
LocalTime localTime = parsedTimestamp.query(TemporalQueries.localTime());
LocalDate localDate = parsedTimestamp.query(TemporalQueries.localDate());
return TimestampData.fromLocalDateTime(LocalDateTime.of(localDate, localTime));
}
Aggregations