use of org.apache.kafka.streams.KafkaStreams in project kafka by apache.
the class SimpleBenchmark method kTableKTableJoin.
/**
* Measure the performance of a KTable-KTable left join. The setup is such that each
* KTable record joins to exactly one element in the other KTable
*/
public void kTableKTableJoin(String kTableTopic1, String kTableTopic2) throws Exception {
if (maybeSetupPhase(kTableTopic1, "simple-benchmark-produce-ktable-topic1", false)) {
maybeSetupPhase(kTableTopic2, "simple-benchmark-produce-ktable-topic2", false);
return;
}
CountDownLatch latch = new CountDownLatch(1);
// setup join
Properties props = setStreamProperties("simple-benchmark-ktable-ktable-join");
final KafkaStreams streams = createKafkaStreamsKTableKTableJoin(props, kTableTopic1, kTableTopic2, latch);
// run benchmark
runGenericBenchmark(streams, "Streams KTableKTable LeftJoin Performance [records/latency/rec-sec/MB-sec joined]: ", latch);
}
use of org.apache.kafka.streams.KafkaStreams in project kafka by apache.
the class SimpleBenchmark method processStreamWithSink.
public void processStreamWithSink(String topic) throws Exception {
if (maybeSetupPhase(topic, "simple-benchmark-process-stream-with-sink-load", true)) {
return;
}
CountDownLatch latch = new CountDownLatch(1);
final KafkaStreams streams = createKafkaStreamsWithSink(topic, latch);
long latency = startStreamsThread(streams, latch);
printResults("Streams Performance [records/latency/rec-sec/MB-sec source+sink]: ", latency);
}
use of org.apache.kafka.streams.KafkaStreams in project kafka by apache.
the class SimpleBenchmark method kStreamKTableJoin.
/**
* Measure the performance of a KStream-KTable left join. The setup is such that each
* KStream record joins to exactly one element in the KTable
*/
public void kStreamKTableJoin(String kStreamTopic, String kTableTopic) throws Exception {
if (maybeSetupPhase(kStreamTopic, "simple-benchmark-produce-kstream", false)) {
maybeSetupPhase(kTableTopic, "simple-benchmark-produce-ktable", false);
return;
}
CountDownLatch latch = new CountDownLatch(1);
// setup join
Properties props = setStreamProperties("simple-benchmark-kstream-ktable-join");
final KafkaStreams streams = createKafkaStreamsKStreamKTableJoin(props, kStreamTopic, kTableTopic, latch);
// run benchmark
runGenericBenchmark(streams, "Streams KStreamKTable LeftJoin Performance [records/latency/rec-sec/MB-sec joined]: ", latch);
}
use of org.apache.kafka.streams.KafkaStreams in project kafka by apache.
the class ShutdownDeadlockTest method start.
public void start() {
final String topic = "source";
final Properties props = new Properties();
props.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, "shouldNotDeadlock");
props.setProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
final KStreamBuilder builder = new KStreamBuilder();
final KStream<String, String> source = builder.stream(Serdes.String(), Serdes.String(), topic);
source.foreach(new ForeachAction<String, String>() {
@Override
public void apply(final String key, final String value) {
throw new RuntimeException("KABOOM!");
}
});
final KafkaStreams streams = new KafkaStreams(builder, props);
streams.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
@Override
public void uncaughtException(final Thread t, final Throwable e) {
Exit.exit(1);
}
});
Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
@Override
public void run() {
streams.close(5, TimeUnit.SECONDS);
}
}));
final Properties producerProps = new Properties();
producerProps.put(ProducerConfig.CLIENT_ID_CONFIG, "SmokeTest");
producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
final KafkaProducer<String, String> producer = new KafkaProducer<>(producerProps);
producer.send(new ProducerRecord<>(topic, "a", "a"));
producer.flush();
streams.start();
synchronized (this) {
try {
wait();
} catch (InterruptedException e) {
// ignored
}
}
}
use of org.apache.kafka.streams.KafkaStreams in project kafka by apache.
the class SmokeTestClient method createKafkaStreams.
private static KafkaStreams createKafkaStreams(File stateDir, String kafka) {
Properties props = new Properties();
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "SmokeTest");
props.put(StreamsConfig.STATE_DIR_CONFIG, stateDir.toString());
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
props.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 3);
props.put(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 2);
props.put(StreamsConfig.BUFFERED_RECORDS_PER_PARTITION_CONFIG, 100);
props.put(StreamsConfig.REPLICATION_FACTOR_CONFIG, 2);
props.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
KStreamBuilder builder = new KStreamBuilder();
KStream<String, Integer> source = builder.stream(stringSerde, intSerde, "data");
source.to(stringSerde, intSerde, "echo");
KStream<String, Integer> data = source.filter(new Predicate<String, Integer>() {
@Override
public boolean test(String key, Integer value) {
return value == null || value != END;
}
});
data.process(SmokeTestUtil.printProcessorSupplier("data"));
// min
KGroupedStream<String, Integer> groupedData = data.groupByKey(stringSerde, intSerde);
groupedData.aggregate(new Initializer<Integer>() {
public Integer apply() {
return Integer.MAX_VALUE;
}
}, new Aggregator<String, Integer, Integer>() {
@Override
public Integer apply(String aggKey, Integer value, Integer aggregate) {
return (value < aggregate) ? value : aggregate;
}
}, TimeWindows.of(TimeUnit.DAYS.toMillis(1)), intSerde, "uwin-min").toStream().map(new Unwindow<String, Integer>()).to(stringSerde, intSerde, "min");
KTable<String, Integer> minTable = builder.table(stringSerde, intSerde, "min", "minStoreName");
minTable.toStream().process(SmokeTestUtil.printProcessorSupplier("min"));
// max
groupedData.aggregate(new Initializer<Integer>() {
public Integer apply() {
return Integer.MIN_VALUE;
}
}, new Aggregator<String, Integer, Integer>() {
@Override
public Integer apply(String aggKey, Integer value, Integer aggregate) {
return (value > aggregate) ? value : aggregate;
}
}, TimeWindows.of(TimeUnit.DAYS.toMillis(2)), intSerde, "uwin-max").toStream().map(new Unwindow<String, Integer>()).to(stringSerde, intSerde, "max");
KTable<String, Integer> maxTable = builder.table(stringSerde, intSerde, "max", "maxStoreName");
maxTable.toStream().process(SmokeTestUtil.printProcessorSupplier("max"));
// sum
groupedData.aggregate(new Initializer<Long>() {
public Long apply() {
return 0L;
}
}, new Aggregator<String, Integer, Long>() {
@Override
public Long apply(String aggKey, Integer value, Long aggregate) {
return (long) value + aggregate;
}
}, TimeWindows.of(TimeUnit.DAYS.toMillis(2)), longSerde, "win-sum").toStream().map(new Unwindow<String, Long>()).to(stringSerde, longSerde, "sum");
KTable<String, Long> sumTable = builder.table(stringSerde, longSerde, "sum", "sumStoreName");
sumTable.toStream().process(SmokeTestUtil.printProcessorSupplier("sum"));
// cnt
groupedData.count(TimeWindows.of(TimeUnit.DAYS.toMillis(2)), "uwin-cnt").toStream().map(new Unwindow<String, Long>()).to(stringSerde, longSerde, "cnt");
KTable<String, Long> cntTable = builder.table(stringSerde, longSerde, "cnt", "cntStoreName");
cntTable.toStream().process(SmokeTestUtil.printProcessorSupplier("cnt"));
// dif
maxTable.join(minTable, new ValueJoiner<Integer, Integer, Integer>() {
public Integer apply(Integer value1, Integer value2) {
return value1 - value2;
}
}).to(stringSerde, intSerde, "dif");
// avg
sumTable.join(cntTable, new ValueJoiner<Long, Long, Double>() {
public Double apply(Long value1, Long value2) {
return (double) value1 / (double) value2;
}
}).to(stringSerde, doubleSerde, "avg");
// test repartition
Agg agg = new Agg();
cntTable.groupBy(agg.selector(), stringSerde, longSerde).aggregate(agg.init(), agg.adder(), agg.remover(), longSerde, "cntByCnt").to(stringSerde, longSerde, "tagg");
final KafkaStreams streamsClient = new KafkaStreams(builder, props);
streamsClient.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
@Override
public void uncaughtException(Thread t, Throwable e) {
System.out.println("FATAL: An unexpected exception is encountered on thread " + t + ": " + e);
streamsClient.close(30, TimeUnit.SECONDS);
}
});
return streamsClient;
}
Aggregations