use of org.apache.flink.api.common.serialization.DeserializationSchema in project flink by apache.
the class KafkaConsumerTestBase method runCollectingSchemaTest.
/**
* Test that ensures that DeserializationSchema can emit multiple records via a Collector.
*
* @throws Exception
*/
public void runCollectingSchemaTest() throws Exception {
final int elementCount = 20;
final String topic = writeSequence("testCollectingSchema", elementCount, 1, 1);
// read using custom schema
final StreamExecutionEnvironment env1 = StreamExecutionEnvironment.getExecutionEnvironment();
env1.setParallelism(1);
env1.getConfig().setRestartStrategy(RestartStrategies.noRestart());
Properties props = new Properties();
props.putAll(standardProps);
props.putAll(secureProps);
DataStream<Tuple2<Integer, String>> fromKafka = env1.addSource(kafkaServer.getConsumer(topic, new CollectingDeserializationSchema(elementCount), props).assignTimestampsAndWatermarks(new AscendingTimestampExtractor<Tuple2<Integer, String>>() {
@Override
public long extractAscendingTimestamp(Tuple2<Integer, String> element) {
String string = element.f1;
return Long.parseLong(string.substring(0, string.length() - 1));
}
}));
fromKafka.keyBy(t -> t.f0).process(new KeyedProcessFunction<Integer, Tuple2<Integer, String>, Void>() {
private boolean registered = false;
@Override
public void processElement(Tuple2<Integer, String> value, Context ctx, Collector<Void> out) throws Exception {
if (!registered) {
ctx.timerService().registerEventTimeTimer(elementCount - 2);
registered = true;
}
}
@Override
public void onTimer(long timestamp, OnTimerContext ctx, Collector<Void> out) throws Exception {
throw new SuccessException();
}
});
tryExecute(env1, "Consume " + elementCount + " elements from Kafka");
deleteTestTopic(topic);
}
use of org.apache.flink.api.common.serialization.DeserializationSchema in project flink by apache.
the class SocketDynamicTableFactory method createDynamicTableSource.
@Override
public DynamicTableSource createDynamicTableSource(Context context) {
// either implement your custom validation logic here ...
// or use the provided helper utility
final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context);
// discover a suitable decoding format
final DecodingFormat<DeserializationSchema<RowData>> decodingFormat = helper.discoverDecodingFormat(DeserializationFormatFactory.class, FactoryUtil.FORMAT);
// validate all options
helper.validate();
// get the validated options
final ReadableConfig options = helper.getOptions();
final String hostname = options.get(HOSTNAME);
final int port = options.get(PORT);
final byte byteDelimiter = (byte) (int) options.get(BYTE_DELIMITER);
// derive the produced data type (excluding computed columns) from the catalog table
final DataType producedDataType = context.getCatalogTable().getResolvedSchema().toPhysicalRowDataType();
// create and return dynamic table source
return new SocketDynamicTableSource(hostname, port, byteDelimiter, decodingFormat, producedDataType);
}
use of org.apache.flink.api.common.serialization.DeserializationSchema in project flink by apache.
the class DebeziumJsonDecodingFormat method createRuntimeDecoder.
@Override
public DeserializationSchema<RowData> createRuntimeDecoder(DynamicTableSource.Context context, DataType physicalDataType, int[][] projections) {
physicalDataType = Projection.of(projections).project(physicalDataType);
final List<ReadableMetadata> readableMetadata = metadataKeys.stream().map(k -> Stream.of(ReadableMetadata.values()).filter(rm -> rm.key.equals(k)).findFirst().orElseThrow(IllegalStateException::new)).collect(Collectors.toList());
final List<DataTypes.Field> metadataFields = readableMetadata.stream().map(m -> DataTypes.FIELD(m.key, m.dataType)).collect(Collectors.toList());
final DataType producedDataType = DataTypeUtils.appendRowFields(physicalDataType, metadataFields);
final TypeInformation<RowData> producedTypeInfo = context.createTypeInformation(producedDataType);
return new DebeziumJsonDeserializationSchema(physicalDataType, readableMetadata, producedTypeInfo, schemaInclude, ignoreParseErrors, timestampFormat);
}
use of org.apache.flink.api.common.serialization.DeserializationSchema in project flink by apache.
the class DeserializationSchemaAdapter method createDeserialization.
private DeserializationSchema<RowData> createDeserialization() throws IOException {
try {
DeserializationSchema<RowData> deserialization = InstantiationUtil.clone(deserializationSchema);
deserialization.open(new DeserializationSchema.InitializationContext() {
@Override
public MetricGroup getMetricGroup() {
throw new UnsupportedOperationException("MetricGroup is unsupported in BulkFormat.");
}
@Override
public UserCodeClassLoader getUserCodeClassLoader() {
return (UserCodeClassLoader) Thread.currentThread().getContextClassLoader();
}
});
return deserialization;
} catch (Exception e) {
throw new IOException(e);
}
}
use of org.apache.flink.api.common.serialization.DeserializationSchema in project flink by apache.
the class KinesisDynamicTableFactory method createDynamicTableSource.
@Override
public DynamicTableSource createDynamicTableSource(Context context) {
FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context);
ReadableConfig tableOptions = helper.getOptions();
ResolvedCatalogTable catalogTable = context.getCatalogTable();
DataType physicalDataType = catalogTable.getResolvedSchema().toPhysicalRowDataType();
KinesisConnectorOptionsUtil optionsUtils = new KinesisConnectorOptionsUtil(catalogTable.getOptions(), tableOptions);
// initialize the table format early in order to register its consumedOptionKeys
// in the TableFactoryHelper, as those are needed for correct option validation
DecodingFormat<DeserializationSchema<RowData>> decodingFormat = helper.discoverDecodingFormat(DeserializationFormatFactory.class, FORMAT);
// validate the data types of the table options
helper.validateExcept(optionsUtils.getNonValidatedPrefixes().toArray(new String[0]));
Properties properties = optionsUtils.getValidatedSourceConfigurations();
return new KinesisDynamicSource(physicalDataType, tableOptions.get(STREAM), properties, decodingFormat);
}
Aggregations