Search in sources :

Example 1 with Sink

use of org.apache.flink.api.connector.sink2.Sink in project voltdb by VoltDB.

the class OSMImport method run.

public void run() {
    CompressionMethod compressionMethod = CompressionMethod.None;
    Sink sink = new VoltDBOsmSink(config.server);
    FastXmlReader fxr = new FastXmlReader(input, false, compressionMethod);
    fxr.setSink(sink);
    fxr.run();
}
Also used : Sink(org.openstreetmap.osmosis.core.task.v0_6.Sink) FastXmlReader(org.openstreetmap.osmosis.xml.v0_6.FastXmlReader) CompressionMethod(org.openstreetmap.osmosis.xml.common.CompressionMethod)

Example 2 with Sink

use of org.apache.flink.api.connector.sink2.Sink in project flink by apache.

the class UpsertKafkaDynamicTableFactoryTest method testBufferedTableSink.

@SuppressWarnings("rawtypes")
@Test
public void testBufferedTableSink() {
    // Construct table sink using options and table sink factory.
    final DynamicTableSink actualSink = createTableSink(SINK_SCHEMA, getModifiedOptions(getFullSinkOptions(), options -> {
        options.put("sink.buffer-flush.max-rows", "100");
        options.put("sink.buffer-flush.interval", "1s");
    }));
    final DynamicTableSink expectedSink = createExpectedSink(SINK_SCHEMA.toPhysicalRowDataType(), keyEncodingFormat, valueEncodingFormat, SINK_KEY_FIELDS, SINK_VALUE_FIELDS, null, SINK_TOPIC, UPSERT_KAFKA_SINK_PROPERTIES, DeliveryGuarantee.AT_LEAST_ONCE, new SinkBufferFlushMode(100, 1000L), null);
    // Test sink format.
    final KafkaDynamicSink actualUpsertKafkaSink = (KafkaDynamicSink) actualSink;
    assertEquals(expectedSink, actualSink);
    // Test kafka producer.
    DynamicTableSink.SinkRuntimeProvider provider = actualUpsertKafkaSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
    assertThat(provider, instanceOf(DataStreamSinkProvider.class));
    final DataStreamSinkProvider sinkProvider = (DataStreamSinkProvider) provider;
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    sinkProvider.consumeDataStream(n -> Optional.empty(), env.fromElements(new BinaryRowData(1)));
    final StreamOperatorFactory<?> sinkOperatorFactory = env.getStreamGraph().getStreamNodes().stream().filter(n -> n.getOperatorName().contains("Writer")).findFirst().orElseThrow(() -> new RuntimeException("Expected operator with name Sink in stream graph.")).getOperatorFactory();
    assertThat(sinkOperatorFactory, instanceOf(SinkWriterOperatorFactory.class));
    org.apache.flink.api.connector.sink2.Sink sink = ((SinkWriterOperatorFactory) sinkOperatorFactory).getSink();
    assertThat(sink, instanceOf(ReducingUpsertSink.class));
}
Also used : DataType(org.apache.flink.table.types.DataType) AtomicDataType(org.apache.flink.table.types.AtomicDataType) Arrays(java.util.Arrays) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) SourceTransformation(org.apache.flink.streaming.api.transformations.SourceTransformation) DataStreamScanProvider(org.apache.flink.table.connector.source.DataStreamScanProvider) CoreMatchers.instanceOf(org.hamcrest.CoreMatchers.instanceOf) DecodingFormat(org.apache.flink.table.connector.format.DecodingFormat) Map(java.util.Map) TestLogger(org.apache.flink.util.TestLogger) FactoryMocks.createTableSink(org.apache.flink.table.factories.utils.FactoryMocks.createTableSink) ConfluentRegistryAvroSerializationSchema(org.apache.flink.formats.avro.registry.confluent.ConfluentRegistryAvroSerializationSchema) DynamicTableSource(org.apache.flink.table.connector.source.DynamicTableSource) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) FlinkMatchers.containsCause(org.apache.flink.core.testutils.FlinkMatchers.containsCause) AVRO_CONFLUENT(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptionsUtil.AVRO_CONFLUENT) AvroRowDataSerializationSchema(org.apache.flink.formats.avro.AvroRowDataSerializationSchema) FactoryUtil(org.apache.flink.table.factories.FactoryUtil) DataStreamSinkProvider(org.apache.flink.table.connector.sink.DataStreamSinkProvider) ValidationException(org.apache.flink.table.api.ValidationException) Optional(java.util.Optional) ScanRuntimeProviderContext(org.apache.flink.table.runtime.connector.source.ScanRuntimeProviderContext) SerializationSchema(org.apache.flink.api.common.serialization.SerializationSchema) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) TestFormatFactory(org.apache.flink.table.factories.TestFormatFactory) DeliveryGuarantee(org.apache.flink.connector.base.DeliveryGuarantee) EncodingFormat(org.apache.flink.table.connector.format.EncodingFormat) Sink(org.apache.flink.api.connector.sink2.Sink) ChangelogMode(org.apache.flink.table.connector.ChangelogMode) StreamOperatorFactory(org.apache.flink.streaming.api.operators.StreamOperatorFactory) Column(org.apache.flink.table.catalog.Column) HashMap(java.util.HashMap) RowType(org.apache.flink.table.types.logical.RowType) ScanTableSource(org.apache.flink.table.connector.source.ScanTableSource) SinkV2Provider(org.apache.flink.table.connector.sink.SinkV2Provider) KafkaSink(org.apache.flink.connector.kafka.sink.KafkaSink) RowDataToAvroConverters(org.apache.flink.formats.avro.RowDataToAvroConverters) FactoryMocks.createTableSource(org.apache.flink.table.factories.utils.FactoryMocks.createTableSource) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) SinkWriterOperatorFactory(org.apache.flink.streaming.runtime.operators.sink.SinkWriterOperatorFactory) ExpectedException(org.junit.rules.ExpectedException) RowData(org.apache.flink.table.data.RowData) Properties(java.util.Properties) Assert.assertTrue(org.junit.Assert.assertTrue) DataTypes(org.apache.flink.table.api.DataTypes) VarCharType(org.apache.flink.table.types.logical.VarCharType) Test(org.junit.Test) BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) KafkaSourceEnumState(org.apache.flink.connector.kafka.source.enumerator.KafkaSourceEnumState) DeserializationSchema(org.apache.flink.api.common.serialization.DeserializationSchema) Consumer(java.util.function.Consumer) StartupMode(org.apache.flink.streaming.connectors.kafka.config.StartupMode) Rule(org.junit.Rule) KafkaSource(org.apache.flink.connector.kafka.source.KafkaSource) UniqueConstraint(org.apache.flink.table.catalog.UniqueConstraint) SinkRuntimeProviderContext(org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext) FactoryMocks(org.apache.flink.table.factories.utils.FactoryMocks) KafkaPartitionSplit(org.apache.flink.connector.kafka.source.split.KafkaPartitionSplit) Transformation(org.apache.flink.api.dag.Transformation) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) AvroSchemaConverter(org.apache.flink.formats.avro.typeutils.AvroSchemaConverter) SinkRuntimeProviderContext(org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext) Sink(org.apache.flink.api.connector.sink2.Sink) DataStreamSinkProvider(org.apache.flink.table.connector.sink.DataStreamSinkProvider) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) SinkWriterOperatorFactory(org.apache.flink.streaming.runtime.operators.sink.SinkWriterOperatorFactory) BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Test(org.junit.Test)

Example 3 with Sink

use of org.apache.flink.api.connector.sink2.Sink in project bboxdb by jnidzwetzki.

the class OSMDataConverter method start.

/**
 * Start the converter
 */
public void start() {
    try {
        // Open file handles
        for (final OSMType osmType : filter.keySet()) {
            final BufferedWriter bw = new BufferedWriter(new FileWriter(new File(output + File.separator + osmType.toString())));
            writerMap.put(osmType, bw);
        }
        System.out.format("Importing %s%n", filename);
        final OsmosisReader reader = new OsmosisReader(new FileInputStream(filename));
        reader.setSink(new Sink() {

            @Override
            public void close() {
            }

            @Override
            public void complete() {
            }

            @Override
            public void initialize(final Map<String, Object> metaData) {
            }

            @Override
            public void process(final EntityContainer entityContainer) {
                try {
                    if (entityContainer.getEntity() instanceof Node) {
                        // Nodes are cheap to handle, dispatching to another thread
                        // is more expensive
                        final Node node = (Node) entityContainer.getEntity();
                        handleNode(node);
                        statistics.incProcessedNodes();
                    } else if (entityContainer.getEntity() instanceof Way) {
                        // Ways are expensive to handle
                        final Way way = (Way) entityContainer.getEntity();
                        queue.put(way);
                        statistics.incProcessedWays();
                    }
                } catch (InterruptedException e) {
                    return;
                }
            }
        });
        // The way consumer
        for (int i = 0; i < CONSUMER_THREADS; i++) {
            threadPool.submit(new Consumer());
        }
        reader.run();
    } catch (IOException e) {
        logger.error("Got an exception during import", e);
    } finally {
        shutdown();
    }
}
Also used : FileWriter(java.io.FileWriter) WayNode(org.openstreetmap.osmosis.core.domain.v0_6.WayNode) SerializableNode(org.bboxdb.tools.converter.osm.util.SerializableNode) Node(org.openstreetmap.osmosis.core.domain.v0_6.Node) EntityContainer(org.openstreetmap.osmosis.core.container.v0_6.EntityContainer) IOException(java.io.IOException) FileInputStream(java.io.FileInputStream) Way(org.openstreetmap.osmosis.core.domain.v0_6.Way) BufferedWriter(java.io.BufferedWriter) Sink(org.openstreetmap.osmosis.core.task.v0_6.Sink) File(java.io.File) OsmosisReader(crosby.binary.osmosis.OsmosisReader)

Aggregations

OsmosisReader (crosby.binary.osmosis.OsmosisReader)1 BufferedWriter (java.io.BufferedWriter)1 File (java.io.File)1 FileInputStream (java.io.FileInputStream)1 FileWriter (java.io.FileWriter)1 IOException (java.io.IOException)1 Arrays (java.util.Arrays)1 Collections (java.util.Collections)1 HashMap (java.util.HashMap)1 Map (java.util.Map)1 Optional (java.util.Optional)1 Properties (java.util.Properties)1 Consumer (java.util.function.Consumer)1 DeserializationSchema (org.apache.flink.api.common.serialization.DeserializationSchema)1 SerializationSchema (org.apache.flink.api.common.serialization.SerializationSchema)1 Sink (org.apache.flink.api.connector.sink2.Sink)1 Transformation (org.apache.flink.api.dag.Transformation)1 DeliveryGuarantee (org.apache.flink.connector.base.DeliveryGuarantee)1 KafkaSink (org.apache.flink.connector.kafka.sink.KafkaSink)1 KafkaSource (org.apache.flink.connector.kafka.source.KafkaSource)1