use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.
the class StreamsResetter method resetInputAndInternalAndSeekToEndIntermediateTopicOffsets.
private void resetInputAndInternalAndSeekToEndIntermediateTopicOffsets() {
final List<String> inputTopics = options.valuesOf(inputTopicsOption);
final List<String> intermediateTopics = options.valuesOf(intermediateTopicsOption);
if (inputTopics.size() == 0 && intermediateTopics.size() == 0) {
System.out.println("No input or intermediate topics specified. Skipping seek.");
return;
} else {
if (inputTopics.size() != 0) {
System.out.println("Resetting offsets to zero for input topics " + inputTopics + " and all internal topics.");
}
if (intermediateTopics.size() != 0) {
System.out.println("Seek-to-end for intermediate topics " + intermediateTopics);
}
}
final Properties config = new Properties();
config.putAll(consumerConfig);
config.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, options.valueOf(bootstrapServerOption));
config.setProperty(ConsumerConfig.GROUP_ID_CONFIG, options.valueOf(applicationIdOption));
config.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
final Set<String> topicsToSubscribe = new HashSet<>(inputTopics.size() + intermediateTopics.size());
for (final String topic : inputTopics) {
if (!allTopics.contains(topic)) {
System.err.println("Input topic " + topic + " not found. Skipping.");
} else {
topicsToSubscribe.add(topic);
}
}
for (final String topic : intermediateTopics) {
if (!allTopics.contains(topic)) {
System.err.println("Intermediate topic " + topic + " not found. Skipping.");
} else {
topicsToSubscribe.add(topic);
}
}
for (final String topic : allTopics) {
if (isInternalTopic(topic)) {
topicsToSubscribe.add(topic);
}
}
try (final KafkaConsumer<byte[], byte[]> client = new KafkaConsumer<>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer())) {
client.subscribe(topicsToSubscribe);
client.poll(1);
final Set<TopicPartition> partitions = client.assignment();
final Set<TopicPartition> inputAndInternalTopicPartitions = new HashSet<>();
final Set<TopicPartition> intermediateTopicPartitions = new HashSet<>();
for (final TopicPartition p : partitions) {
final String topic = p.topic();
if (isInputTopic(topic) || isInternalTopic(topic)) {
inputAndInternalTopicPartitions.add(p);
} else if (isIntermediateTopic(topic)) {
intermediateTopicPartitions.add(p);
} else {
System.err.println("Skipping invalid partition: " + p);
}
}
if (inputAndInternalTopicPartitions.size() > 0) {
client.seekToBeginning(inputAndInternalTopicPartitions);
}
if (intermediateTopicPartitions.size() > 0) {
client.seekToEnd(intermediateTopicPartitions);
}
for (final TopicPartition p : partitions) {
client.position(p);
}
client.commitSync();
} catch (final RuntimeException e) {
System.err.println("ERROR: Resetting offsets failed.");
throw e;
}
System.out.println("Done.");
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.
the class KafkaConsumerTest method testInvalidSocketReceiveBufferSize.
@Test(expected = KafkaException.class)
public void testInvalidSocketReceiveBufferSize() throws Exception {
Map<String, Object> config = new HashMap<>();
config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
config.put(ConsumerConfig.RECEIVE_BUFFER_CONFIG, -2);
new KafkaConsumer<>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer());
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.
the class KafkaConsumerTest method newConsumer.
private KafkaConsumer<byte[], byte[]> newConsumer() {
Properties props = new Properties();
props.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "my.consumer");
props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
props.setProperty(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName());
return new KafkaConsumer<>(props, new ByteArrayDeserializer(), new ByteArrayDeserializer());
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project apache-kafka-on-k8s by banzaicloud.
the class KafkaConsumerTest method testMetricConfigRecordingLevel.
@Test
public void testMetricConfigRecordingLevel() {
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
try (KafkaConsumer consumer = new KafkaConsumer<>(props, new ByteArrayDeserializer(), new ByteArrayDeserializer())) {
assertEquals(Sensor.RecordingLevel.INFO, consumer.metrics.config().recordLevel());
}
props.put(ConsumerConfig.METRICS_RECORDING_LEVEL_CONFIG, "DEBUG");
try (KafkaConsumer consumer = new KafkaConsumer<>(props, new ByteArrayDeserializer(), new ByteArrayDeserializer())) {
assertEquals(Sensor.RecordingLevel.DEBUG, consumer.metrics.config().recordLevel());
}
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project apache-kafka-on-k8s by banzaicloud.
the class KafkaConsumerTest method testInvalidSocketSendBufferSize.
@Test(expected = KafkaException.class)
public void testInvalidSocketSendBufferSize() throws Exception {
Map<String, Object> config = new HashMap<>();
config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
config.put(ConsumerConfig.SEND_BUFFER_CONFIG, -2);
new KafkaConsumer<>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer());
}
Aggregations