use of org.apache.kafka.streams.kstream.KStreamBuilder in project kafka by apache.
the class StreamThreadTest method shouldNotViolateAtLeastOnceWhenExceptionOccursDuringFlushStateWhileSuspendingState.
@Test
public void shouldNotViolateAtLeastOnceWhenExceptionOccursDuringFlushStateWhileSuspendingState() throws Exception {
final KStreamBuilder builder = new KStreamBuilder();
builder.setApplicationId(applicationId);
builder.stream("t1").groupByKey();
final StreamsConfig config = new StreamsConfig(configProps());
final MockClientSupplier clientSupplier = new MockClientSupplier();
final TestStreamTask testStreamTask = new TestStreamTask(new TaskId(0, 0), applicationId, Utils.mkSet(new TopicPartition("t1", 0)), builder.build(0), clientSupplier.consumer, clientSupplier.producer, clientSupplier.restoreConsumer, config, new MockStreamsMetrics(new Metrics()), new StateDirectory(applicationId, config.getString(StreamsConfig.STATE_DIR_CONFIG), time)) {
@Override
public void flushState() {
throw new RuntimeException("KABOOM!");
}
};
final StreamsConfig config1 = new StreamsConfig(configProps());
final StreamThread thread = new StreamThread(builder, config1, clientSupplier, applicationId, clientId, processId, new Metrics(), new MockTime(), new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) {
@Override
protected StreamTask createStreamTask(final TaskId id, final Collection<TopicPartition> partitions) {
return testStreamTask;
}
};
final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
activeTasks.put(testStreamTask.id, testStreamTask.partitions);
thread.partitionAssignor(new MockStreamsPartitionAssignor(activeTasks));
thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
thread.rebalanceListener.onPartitionsAssigned(testStreamTask.partitions);
try {
thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
fail("should have thrown exception");
} catch (Exception e) {
// expected
}
assertFalse(testStreamTask.committed);
}
use of org.apache.kafka.streams.kstream.KStreamBuilder in project microservices by pwillhan.
the class GeoLocationStreams method init.
@PostConstruct
public void init() {
Map<String, Object> props = new HashMap<>();
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "geolocation-application");
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.99.100:9092");
props.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
props.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, GeoLocationSerdes.class.getName());
StreamsConfig config = new StreamsConfig(props);
KStreamBuilder builder = new KStreamBuilder();
builder.stream("geolocationStreams").filter(new Predicate<Object, Object>() {
@Override
public boolean test(Object key, Object value) {
GeoLocation geolocation = (GeoLocation) value;
System.out.println("Stream received => " + value);
return geolocation.getLatitude() >= -90 && geolocation.getLatitude() < 90 && geolocation.getLongitude() >= -180 && geolocation.getLongitude() < 180;
}
}).to("geolocations");
KafkaStreams streams = new KafkaStreams(builder, config);
streams.start();
}
use of org.apache.kafka.streams.kstream.KStreamBuilder in project kafka-streams-examples by confluentinc.
the class EmailService method processStreams.
private KafkaStreams processStreams(final String bootstrapServers, final String stateDir) {
KStreamBuilder builder = new KStreamBuilder();
// Create the streams/tables for the join
KStream<String, Order> orders = builder.stream(ORDERS.keySerde(), ORDERS.valueSerde(), ORDERS.name());
KStream<String, Payment> payments = builder.stream(PAYMENTS.keySerde(), PAYMENTS.valueSerde(), PAYMENTS.name());
GlobalKTable<Long, Customer> customers = builder.globalTable(CUSTOMERS.keySerde(), CUSTOMERS.valueSerde(), CUSTOMERS.name());
// Rekey payments to be by OrderId for the windowed join
payments = payments.selectKey((s, payment) -> payment.getOrderId());
// Join the two streams and the table then send an email for each
orders.join(payments, EmailTuple::new, // Join Orders and Payments streams
JoinWindows.of(1 * MIN), serdes).join(customers, (key1, tuple) -> tuple.order.getCustomerId(), // note how, because we use a GKtable, we can join on any attribute of the Customer.
(tuple, customer) -> tuple.setCustomer(customer)).peek((key, emailTuple) -> emailer.sendEmail(emailTuple));
return new KafkaStreams(builder, baseStreamsConfig(bootstrapServers, stateDir, APP_ID));
}
use of org.apache.kafka.streams.kstream.KStreamBuilder in project tutorials by eugenp.
the class KafkaStreamsLiveTest method shouldTestKafkaStreams.
@Test
@Ignore("it needs to have kafka broker running on local")
public void shouldTestKafkaStreams() throws InterruptedException {
// given
String inputTopic = "inputTopic";
Properties streamsConfiguration = new Properties();
streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-live-test");
streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000);
streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
// Use a temporary directory for storing state, which will be automatically removed after the test.
streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath());
// when
KStreamBuilder builder = new KStreamBuilder();
KStream<String, String> textLines = builder.stream(inputTopic);
Pattern pattern = Pattern.compile("\\W+", Pattern.UNICODE_CHARACTER_CLASS);
KTable<String, Long> wordCounts = textLines.flatMapValues(value -> Arrays.asList(pattern.split(value.toLowerCase()))).groupBy((key, word) -> word).count();
wordCounts.foreach((word, count) -> System.out.println("word: " + word + " -> " + count));
String outputTopic = "outputTopic";
final Serde<String> stringSerde = Serdes.String();
final Serde<Long> longSerde = Serdes.Long();
wordCounts.to(stringSerde, longSerde, outputTopic);
KafkaStreams streams = new KafkaStreams(builder, streamsConfiguration);
streams.start();
// then
Thread.sleep(30000);
streams.close();
}
Aggregations