use of org.apache.kafka.clients.consumer.ConsumerConfig in project kafka-streams-examples by confluentinc.
the class SumLambdaIntegrationTest method shouldSumEvenNumbers.
@Test
public void shouldSumEvenNumbers() throws Exception {
List<Integer> inputValues = Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
List<Integer> expectedValues = Collections.singletonList(30);
//
// Step 1: Configure and start the processor topology.
//
StreamsBuilder builder = new StreamsBuilder();
Properties streamsConfiguration = new Properties();
streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "sum-lambda-integration-test");
streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.Integer().getClass().getName());
streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.Integer().getClass().getName());
// The commit interval for flushing records to state stores and downstream must be lower than
// this integration test's timeout (30 secs) to ensure we observe the expected processing results.
streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 10 * 1000);
streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
// Use a temporary directory for storing state, which will be automatically removed after the test.
streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath());
KStream<Integer, Integer> input = builder.stream(inputTopic);
KTable<Integer, Integer> sumOfOddNumbers = input.filter((k, v) -> v % 2 == 0).selectKey((k, v) -> 1).groupByKey().reduce((v1, v2) -> v1 + v2);
sumOfOddNumbers.toStream().to(outputTopic);
KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration);
streams.start();
//
// Step 2: Produce some input data to the input topic.
//
Properties producerConfig = new Properties();
producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
producerConfig.put(ProducerConfig.ACKS_CONFIG, "all");
producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0);
producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
IntegrationTestUtils.produceValuesSynchronously(inputTopic, inputValues, producerConfig);
//
// Step 3: Verify the application's output data.
//
Properties consumerConfig = new Properties();
consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "sum-lambda-integration-test-standard-consumer");
consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class);
consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class);
List<String> actualValues = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfig, outputTopic, expectedValues.size());
streams.close();
assertThat(actualValues).isEqualTo(expectedValues);
}
use of org.apache.kafka.clients.consumer.ConsumerConfig in project incubator-skywalking by apache.
the class ConsumerConstructorInterceptor method onConstruct.
@Override
public void onConstruct(EnhancedInstance objInst, Object[] allArguments) {
ConsumerConfig config = (ConsumerConfig) allArguments[0];
// set the bootstrap server address
ConsumerEnhanceRequiredInfo requiredInfo = new ConsumerEnhanceRequiredInfo();
requiredInfo.setBrokerServers(config.getList("bootstrap.servers"));
objInst.setSkyWalkingDynamicField(requiredInfo);
}
use of org.apache.kafka.clients.consumer.ConsumerConfig in project kafkastreams-cep by fhussonnois.
the class CEPStockKStreamsIntegrationTest method test.
@Test
public void test() throws ExecutionException, InterruptedException {
final Collection<KeyValue<String, String>> batch1 = Arrays.asList(new KeyValue<>(null, "{\"name\":\"e1\",\"price\":100,\"volume\":1010}"), new KeyValue<>(null, "{\"name\":\"e2\",\"price\":120,\"volume\":990}"), new KeyValue<>(null, "{\"name\":\"e3\",\"price\":120,\"volume\":1005}"), new KeyValue<>(null, "{\"name\":\"e4\",\"price\":121,\"volume\":999}"), new KeyValue<>(null, "{\"name\":\"e5\",\"price\":120,\"volume\":999}"), new KeyValue<>(null, "{\"name\":\"e6\",\"price\":125,\"volume\":750}"), new KeyValue<>(null, "{\"name\":\"e7\",\"price\":120,\"volume\":950}"), new KeyValue<>(null, "{\"name\":\"e8\",\"price\":120,\"volume\":700}"));
IntegrationTestUtils.produceKeyValuesSynchronously(INPUT_STREAM, batch1, TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, StringSerializer.class, new Properties()), mockTime);
// build query
ComplexStreamsBuilder builder = new ComplexStreamsBuilder();
CEPStream<String, StockEvent> stream = builder.stream(INPUT_STREAM);
KStream<String, Sequence<String, StockEvent>> stocks = stream.query("Stocks", Patterns.STOCKS);
stocks.mapValues(seq -> {
JSONObject json = new JSONObject();
seq.asMap().forEach((k, v) -> {
JSONArray events = new JSONArray();
json.put(k, events);
List<String> collect = v.stream().map(e -> e.value.name).collect(Collectors.toList());
Collections.reverse(collect);
collect.forEach(events::add);
});
return json.toJSONString();
}).through(OUTPUT_STREAM, Produced.with(null, Serdes.String())).print(Printed.toSysOut());
Topology topology = builder.build();
kafkaStreams = new KafkaStreams(topology, streamsConfiguration);
kafkaStreams.start();
final Properties consumerConfig = TestUtils.consumerConfig(CLUSTER.bootstrapServers(), StringDeserializer.class, StringDeserializer.class);
List<KeyValue<String, String>> result = IntegrationTestUtils.readKeyValues(OUTPUT_STREAM, consumerConfig, TimeUnit.SECONDS.toMillis(10), 4);
Assert.assertEquals(4, result.size());
Assert.assertEquals("{\"0\":[\"e1\"],\"1\":[\"e2\",\"e3\",\"e4\",\"e5\"],\"2\":[\"e6\"]}", result.get(0).value);
Assert.assertEquals("{\"0\":[\"e3\"],\"1\":[\"e4\"],\"2\":[\"e6\"]}", result.get(1).value);
Assert.assertEquals("{\"0\":[\"e1\"],\"1\":[\"e2\",\"e3\",\"e4\",\"e5\",\"e6\",\"e7\"],\"2\":[\"e8\"]}", result.get(2).value);
Assert.assertEquals("{\"0\":[\"e3\"],\"1\":[\"e4\",\"e6\"],\"2\":[\"e8\"]}", result.get(3).value);
}
use of org.apache.kafka.clients.consumer.ConsumerConfig in project samza by apache.
the class TestKafkaSystemConsumer method createConsumer.
private KafkaSystemConsumer createConsumer(String fetchMsg, String fetchBytes) {
final Map<String, String> map = new HashMap<>();
map.put(JobConfig.JOB_NAME, TEST_JOB);
map.put(String.format(KafkaConfig.CONSUMER_FETCH_THRESHOLD(), TEST_SYSTEM), fetchMsg);
map.put(String.format(KafkaConfig.CONSUMER_FETCH_THRESHOLD_BYTES(), TEST_SYSTEM), fetchBytes);
map.put(String.format("systems.%s.consumer.%s", TEST_SYSTEM, ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG), BOOTSTRAP_SERVERS);
map.put(JobConfig.JOB_NAME, "jobName");
Config config = new MapConfig(map);
String clientId = KafkaConsumerConfig.createClientId(TEST_CLIENT_ID, config);
KafkaConsumerConfig consumerConfig = KafkaConsumerConfig.getKafkaSystemConsumerConfig(config, TEST_SYSTEM, clientId);
final KafkaConsumer<byte[], byte[]> kafkaConsumer = new MockKafkaConsumer(consumerConfig);
MockKafkaSystemConsumer newKafkaSystemConsumer = new MockKafkaSystemConsumer(kafkaConsumer, TEST_SYSTEM, config, TEST_CLIENT_ID, new KafkaSystemConsumerMetrics(TEST_SYSTEM, new NoOpMetricsRegistry()), System::currentTimeMillis);
return newKafkaSystemConsumer;
}
use of org.apache.kafka.clients.consumer.ConsumerConfig in project kafka by apache.
the class NamedTopologyIntegrationTest method shouldAddAndRemoveNamedTopologiesBeforeStartingAndRouteQueriesToCorrectTopology.
@Test
public void shouldAddAndRemoveNamedTopologiesBeforeStartingAndRouteQueriesToCorrectTopology() throws Exception {
try {
// for this test we have one of the topologies read from an input topic with just one partition so
// that there's only one instance of that topology's store and thus should always have exactly one
// StreamsMetadata returned by any of the methods that look up all hosts with a specific store and topology
CLUSTER.createTopic(SINGLE_PARTITION_INPUT_STREAM, 1, 1);
CLUSTER.createTopic(SINGLE_PARTITION_OUTPUT_STREAM, 1, 1);
produceToInputTopics(SINGLE_PARTITION_INPUT_STREAM, STANDARD_INPUT_DATA);
final String topology1Store = "store-" + TOPOLOGY_1;
final String topology2Store = "store-" + TOPOLOGY_2;
topology1Builder.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(Materialized.as(topology1Store)).toStream().to(OUTPUT_STREAM_1);
topology2Builder.stream(SINGLE_PARTITION_INPUT_STREAM).groupByKey().count(Materialized.as(topology2Store)).toStream().to(SINGLE_PARTITION_OUTPUT_STREAM);
streams.addNamedTopology(topology1Builder.build());
streams.removeNamedTopology(TOPOLOGY_1);
assertThat(streams.getTopologyByName(TOPOLOGY_1), is(Optional.empty()));
streams.addNamedTopology(topology1Builder.build());
streams.addNamedTopology(topology2Builder.build());
IntegrationTestUtils.startApplicationAndWaitUntilRunning(singletonList(streams), Duration.ofSeconds(15));
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, OUTPUT_STREAM_1, 3), equalTo(COUNT_OUTPUT_DATA));
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SINGLE_PARTITION_OUTPUT_STREAM, 3), equalTo(COUNT_OUTPUT_DATA));
final ReadOnlyKeyValueStore<String, Long> store = streams.store(NamedTopologyStoreQueryParameters.fromNamedTopologyAndStoreNameAndType(TOPOLOGY_1, topology1Store, QueryableStoreTypes.keyValueStore()));
assertThat(store.get("A"), equalTo(2L));
final Collection<StreamsMetadata> streamsMetadata = streams.streamsMetadataForStore(topology1Store, TOPOLOGY_1);
final Collection<StreamsMetadata> streamsMetadata2 = streams.streamsMetadataForStore(topology2Store, TOPOLOGY_2);
assertThat(streamsMetadata.size(), equalTo(1));
assertThat(streamsMetadata2.size(), equalTo(1));
final KeyQueryMetadata keyMetadata = streams.queryMetadataForKey(topology1Store, "A", new StringSerializer(), TOPOLOGY_1);
final KeyQueryMetadata keyMetadata2 = streams.queryMetadataForKey(topology2Store, "A", new StringSerializer(), TOPOLOGY_2);
assertThat(keyMetadata, not(NOT_AVAILABLE));
assertThat(keyMetadata, equalTo(keyMetadata2));
final Map<String, Map<Integer, LagInfo>> partitionLags1 = streams.allLocalStorePartitionLagsForTopology(TOPOLOGY_1);
final Map<String, Map<Integer, LagInfo>> partitionLags2 = streams.allLocalStorePartitionLagsForTopology(TOPOLOGY_2);
assertThat(partitionLags1.keySet(), equalTo(singleton(topology1Store)));
assertThat(partitionLags1.get(topology1Store).keySet(), equalTo(mkSet(0, 1)));
assertThat(partitionLags2.keySet(), equalTo(singleton(topology2Store)));
// only one copy of the store in topology-2
assertThat(partitionLags2.get(topology2Store).keySet(), equalTo(singleton(0)));
// Start up a second node with both topologies
setupSecondKafkaStreams();
topology1Builder2.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(Materialized.as(topology1Store)).toStream().to(OUTPUT_STREAM_1);
topology2Builder2.stream(SINGLE_PARTITION_INPUT_STREAM).groupByKey().count(Materialized.as(topology2Store)).toStream().to(SINGLE_PARTITION_OUTPUT_STREAM);
streams2.start(asList(topology1Builder2.build(), topology2Builder2.build()));
waitForApplicationState(asList(streams, streams2), State.RUNNING, Duration.ofSeconds(30));
verifyMetadataForTopology(TOPOLOGY_1, streams.streamsMetadataForStore(topology1Store, TOPOLOGY_1), streams2.streamsMetadataForStore(topology1Store, TOPOLOGY_1));
verifyMetadataForTopology(TOPOLOGY_2, streams.streamsMetadataForStore(topology2Store, TOPOLOGY_2), streams2.streamsMetadataForStore(topology2Store, TOPOLOGY_2));
verifyMetadataForTopology(TOPOLOGY_1, streams.allStreamsClientsMetadataForTopology(TOPOLOGY_1), streams2.allStreamsClientsMetadataForTopology(TOPOLOGY_1));
verifyMetadataForTopology(TOPOLOGY_2, streams.allStreamsClientsMetadataForTopology(TOPOLOGY_2), streams2.allStreamsClientsMetadataForTopology(TOPOLOGY_2));
} finally {
CLUSTER.deleteTopics(SINGLE_PARTITION_INPUT_STREAM, SINGLE_PARTITION_OUTPUT_STREAM);
}
}
Aggregations