use of org.apache.flink.api.connector.sink2.Sink in project voltdb by VoltDB.
the class OSMImport method run.
public void run() {
CompressionMethod compressionMethod = CompressionMethod.None;
Sink sink = new VoltDBOsmSink(config.server);
FastXmlReader fxr = new FastXmlReader(input, false, compressionMethod);
fxr.setSink(sink);
fxr.run();
}
use of org.apache.flink.api.connector.sink2.Sink in project flink by apache.
the class UpsertKafkaDynamicTableFactoryTest method testBufferedTableSink.
@SuppressWarnings("rawtypes")
@Test
public void testBufferedTableSink() {
// Construct table sink using options and table sink factory.
final DynamicTableSink actualSink = createTableSink(SINK_SCHEMA, getModifiedOptions(getFullSinkOptions(), options -> {
options.put("sink.buffer-flush.max-rows", "100");
options.put("sink.buffer-flush.interval", "1s");
}));
final DynamicTableSink expectedSink = createExpectedSink(SINK_SCHEMA.toPhysicalRowDataType(), keyEncodingFormat, valueEncodingFormat, SINK_KEY_FIELDS, SINK_VALUE_FIELDS, null, SINK_TOPIC, UPSERT_KAFKA_SINK_PROPERTIES, DeliveryGuarantee.AT_LEAST_ONCE, new SinkBufferFlushMode(100, 1000L), null);
// Test sink format.
final KafkaDynamicSink actualUpsertKafkaSink = (KafkaDynamicSink) actualSink;
assertEquals(expectedSink, actualSink);
// Test kafka producer.
DynamicTableSink.SinkRuntimeProvider provider = actualUpsertKafkaSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
assertThat(provider, instanceOf(DataStreamSinkProvider.class));
final DataStreamSinkProvider sinkProvider = (DataStreamSinkProvider) provider;
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
sinkProvider.consumeDataStream(n -> Optional.empty(), env.fromElements(new BinaryRowData(1)));
final StreamOperatorFactory<?> sinkOperatorFactory = env.getStreamGraph().getStreamNodes().stream().filter(n -> n.getOperatorName().contains("Writer")).findFirst().orElseThrow(() -> new RuntimeException("Expected operator with name Sink in stream graph.")).getOperatorFactory();
assertThat(sinkOperatorFactory, instanceOf(SinkWriterOperatorFactory.class));
org.apache.flink.api.connector.sink2.Sink sink = ((SinkWriterOperatorFactory) sinkOperatorFactory).getSink();
assertThat(sink, instanceOf(ReducingUpsertSink.class));
}
use of org.apache.flink.api.connector.sink2.Sink in project bboxdb by jnidzwetzki.
the class OSMDataConverter method start.
/**
* Start the converter
*/
public void start() {
try {
// Open file handles
for (final OSMType osmType : filter.keySet()) {
final BufferedWriter bw = new BufferedWriter(new FileWriter(new File(output + File.separator + osmType.toString())));
writerMap.put(osmType, bw);
}
System.out.format("Importing %s%n", filename);
final OsmosisReader reader = new OsmosisReader(new FileInputStream(filename));
reader.setSink(new Sink() {
@Override
public void close() {
}
@Override
public void complete() {
}
@Override
public void initialize(final Map<String, Object> metaData) {
}
@Override
public void process(final EntityContainer entityContainer) {
try {
if (entityContainer.getEntity() instanceof Node) {
// Nodes are cheap to handle, dispatching to another thread
// is more expensive
final Node node = (Node) entityContainer.getEntity();
handleNode(node);
statistics.incProcessedNodes();
} else if (entityContainer.getEntity() instanceof Way) {
// Ways are expensive to handle
final Way way = (Way) entityContainer.getEntity();
queue.put(way);
statistics.incProcessedWays();
}
} catch (InterruptedException e) {
return;
}
}
});
// The way consumer
for (int i = 0; i < CONSUMER_THREADS; i++) {
threadPool.submit(new Consumer());
}
reader.run();
} catch (IOException e) {
logger.error("Got an exception during import", e);
} finally {
shutdown();
}
}
Aggregations