use of org.apache.kafka.clients.consumer.ConsumerConfig in project kafka by apache.
the class NamedTopologyIntegrationTest method shouldAddNamedTopologyToRunningApplicationWithMultipleNodes.
@Test
public void shouldAddNamedTopologyToRunningApplicationWithMultipleNodes() throws Exception {
setupSecondKafkaStreams();
topology1Builder.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_1);
topology1Builder2.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_1);
topology2Builder.stream(INPUT_STREAM_2).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_2);
topology2Builder2.stream(INPUT_STREAM_2).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_2);
streams.start(topology1Builder.build());
streams2.start(topology1Builder2.build());
waitForApplicationState(asList(streams, streams2), State.RUNNING, Duration.ofSeconds(30));
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, OUTPUT_STREAM_1, 3), equalTo(COUNT_OUTPUT_DATA));
final AddNamedTopologyResult result = streams.addNamedTopology(topology2Builder.build());
final AddNamedTopologyResult result2 = streams2.addNamedTopology(topology2Builder2.build());
result.all().get();
result2.all().get();
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, OUTPUT_STREAM_2, 3), equalTo(COUNT_OUTPUT_DATA));
}
use of org.apache.kafka.clients.consumer.ConsumerConfig in project kafka by apache.
the class NamedTopologyIntegrationTest method shouldRemoveNamedTopologyToRunningApplicationWithMultipleNodesAndResetsOffsets.
@Test
public void shouldRemoveNamedTopologyToRunningApplicationWithMultipleNodesAndResetsOffsets() throws Exception {
setupSecondKafkaStreams();
topology1Builder.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_1);
topology1Builder2.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_1);
streams.start(topology1Builder.build());
streams2.start(topology1Builder2.build());
waitForApplicationState(asList(streams, streams2), State.RUNNING, Duration.ofSeconds(30));
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, OUTPUT_STREAM_1, 3), equalTo(COUNT_OUTPUT_DATA));
final RemoveNamedTopologyResult result = streams.removeNamedTopology(TOPOLOGY_1, true);
streams2.removeNamedTopology(TOPOLOGY_1, true).all().get();
result.all().get();
assertThat(streams.getTopologyByName(TOPOLOGY_1), equalTo(Optional.empty()));
assertThat(streams2.getTopologyByName(TOPOLOGY_1), equalTo(Optional.empty()));
streams.cleanUpNamedTopology(TOPOLOGY_1);
streams2.cleanUpNamedTopology(TOPOLOGY_1);
CLUSTER.getAllTopicsInCluster().stream().filter(t -> t.contains("-changelog")).forEach(t -> {
try {
CLUSTER.deleteTopicAndWait(t);
} catch (final InterruptedException e) {
e.printStackTrace();
}
});
topology2Builder.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_2);
topology2Builder2.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(IN_MEMORY_STORE).toStream().to(OUTPUT_STREAM_2);
final AddNamedTopologyResult result1 = streams.addNamedTopology(topology2Builder.build());
streams2.addNamedTopology(topology2Builder2.build()).all().get();
result1.all().get();
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, OUTPUT_STREAM_2, 3), equalTo(COUNT_OUTPUT_DATA));
}
use of org.apache.kafka.clients.consumer.ConsumerConfig in project kafka by apache.
the class NamedTopologyIntegrationTest method shouldAddToEmptyInitialTopologyRemoveResetOffsetsThenAddSameNamedTopologyWithRepartitioning.
@Test
public void shouldAddToEmptyInitialTopologyRemoveResetOffsetsThenAddSameNamedTopologyWithRepartitioning() throws Exception {
CLUSTER.createTopics(SUM_OUTPUT, COUNT_OUTPUT);
// Build up named topology with two stateful subtopologies
final KStream<String, Long> inputStream1 = topology1Builder.stream(INPUT_STREAM_1);
inputStream1.map(KeyValue::new).groupByKey().count().toStream().to(COUNT_OUTPUT);
inputStream1.map(KeyValue::new).groupByKey().reduce(Long::sum).toStream().to(SUM_OUTPUT);
streams.start();
final NamedTopology namedTopology = topology1Builder.build();
streams.addNamedTopology(namedTopology).all().get();
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, COUNT_OUTPUT, 3), equalTo(COUNT_OUTPUT_DATA));
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SUM_OUTPUT, 3), equalTo(SUM_OUTPUT_DATA));
streams.removeNamedTopology(TOPOLOGY_1, true).all().get();
streams.cleanUpNamedTopology(TOPOLOGY_1);
CLUSTER.getAllTopicsInCluster().stream().filter(t -> t.contains("-changelog") || t.contains("-repartition")).forEach(t -> {
try {
CLUSTER.deleteTopicsAndWait(t);
} catch (final InterruptedException e) {
e.printStackTrace();
}
});
final KStream<String, Long> inputStream = topology1BuilderDup.stream(INPUT_STREAM_1);
inputStream.map(KeyValue::new).groupByKey().count().toStream().to(COUNT_OUTPUT);
inputStream.map(KeyValue::new).groupByKey().reduce(Long::sum).toStream().to(SUM_OUTPUT);
final NamedTopology namedTopologyDup = topology1BuilderDup.build();
streams.addNamedTopology(namedTopologyDup).all().get();
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, COUNT_OUTPUT, 3), equalTo(COUNT_OUTPUT_DATA));
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SUM_OUTPUT, 3), equalTo(SUM_OUTPUT_DATA));
CLUSTER.deleteTopicsAndWait(SUM_OUTPUT, COUNT_OUTPUT);
}
use of org.apache.kafka.clients.consumer.ConsumerConfig in project kafka by apache.
the class StreamsResetter method maybeResetInputAndSeekToEndIntermediateTopicOffsets.
private int maybeResetInputAndSeekToEndIntermediateTopicOffsets(final Map<Object, Object> consumerConfig, final boolean dryRun) throws IOException, ParseException {
final List<String> inputTopics = options.valuesOf(inputTopicsOption);
final List<String> intermediateTopics = options.valuesOf(intermediateTopicsOption);
int topicNotFound = EXIT_CODE_SUCCESS;
final List<String> notFoundInputTopics = new ArrayList<>();
final List<String> notFoundIntermediateTopics = new ArrayList<>();
final String groupId = options.valueOf(applicationIdOption);
if (inputTopics.size() == 0 && intermediateTopics.size() == 0) {
System.out.println("No input or intermediate topics specified. Skipping seek.");
return EXIT_CODE_SUCCESS;
}
if (inputTopics.size() != 0) {
System.out.println("Reset-offsets for input topics " + inputTopics);
}
if (intermediateTopics.size() != 0) {
System.out.println("Seek-to-end for intermediate topics " + intermediateTopics);
}
final Set<String> topicsToSubscribe = new HashSet<>(inputTopics.size() + intermediateTopics.size());
for (final String topic : inputTopics) {
if (!allTopics.contains(topic)) {
notFoundInputTopics.add(topic);
} else {
topicsToSubscribe.add(topic);
}
}
for (final String topic : intermediateTopics) {
if (!allTopics.contains(topic)) {
notFoundIntermediateTopics.add(topic);
} else {
topicsToSubscribe.add(topic);
}
}
if (!notFoundInputTopics.isEmpty()) {
System.out.println("Following input topics are not found, skipping them");
for (final String topic : notFoundInputTopics) {
System.out.println("Topic: " + topic);
}
topicNotFound = EXIT_CODE_ERROR;
}
if (!notFoundIntermediateTopics.isEmpty()) {
System.out.println("Following intermediate topics are not found, skipping them");
for (final String topic : notFoundIntermediateTopics) {
System.out.println("Topic:" + topic);
}
topicNotFound = EXIT_CODE_ERROR;
}
// try to poll with an empty subscription)
if (topicsToSubscribe.isEmpty()) {
return topicNotFound;
}
final Properties config = new Properties();
config.putAll(consumerConfig);
config.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId);
config.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
try (final KafkaConsumer<byte[], byte[]> client = new KafkaConsumer<>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer())) {
final Collection<TopicPartition> partitions = topicsToSubscribe.stream().map(client::partitionsFor).flatMap(Collection::stream).map(info -> new TopicPartition(info.topic(), info.partition())).collect(Collectors.toList());
client.assign(partitions);
final Set<TopicPartition> inputTopicPartitions = new HashSet<>();
final Set<TopicPartition> intermediateTopicPartitions = new HashSet<>();
for (final TopicPartition p : partitions) {
final String topic = p.topic();
if (isInputTopic(topic)) {
inputTopicPartitions.add(p);
} else if (isIntermediateTopic(topic)) {
intermediateTopicPartitions.add(p);
} else {
System.err.println("Skipping invalid partition: " + p);
}
}
maybeReset(groupId, client, inputTopicPartitions);
maybeSeekToEnd(groupId, client, intermediateTopicPartitions);
if (!dryRun) {
for (final TopicPartition p : partitions) {
client.position(p);
}
client.commitSync();
}
} catch (final IOException | ParseException e) {
System.err.println("ERROR: Resetting offsets failed.");
throw e;
}
System.out.println("Done.");
return topicNotFound;
}
use of org.apache.kafka.clients.consumer.ConsumerConfig in project pinpoint by naver.
the class ConsumerConstructorInterceptor method after.
@Override
public void after(Object target, Object[] args, Object result, Throwable throwable) {
if (isDebug) {
logger.afterInterceptor(target, args, result, throwable);
}
if (throwable != null) {
return;
}
if (!(target instanceof RemoteAddressFieldAccessor)) {
return;
}
ConsumerConfig consumerConfig = getConsumerConfig(args);
if (consumerConfig == null) {
return;
}
String remoteAddress = getRemoteAddress(consumerConfig);
((RemoteAddressFieldAccessor) target)._$PINPOINT$_setRemoteAddress(remoteAddress);
}
Aggregations