use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.
the class MirrorClient method remoteConsumerOffsets.
/**
* Translate a remote consumer group's offsets into corresponding local offsets. Topics are automatically
* renamed according to the ReplicationPolicy.
* @param consumerGroupId group ID of remote consumer group
* @param remoteClusterAlias alias of remote cluster
* @param timeout timeout
*/
public Map<TopicPartition, OffsetAndMetadata> remoteConsumerOffsets(String consumerGroupId, String remoteClusterAlias, Duration timeout) {
long deadline = System.currentTimeMillis() + timeout.toMillis();
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
try (KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(consumerConfig, new ByteArrayDeserializer(), new ByteArrayDeserializer())) {
// checkpoint topics are not "remote topics", as they are not replicated. So we don't need
// to use ReplicationPolicy to create the checkpoint topic here.
String checkpointTopic = replicationPolicy.checkpointsTopic(remoteClusterAlias);
List<TopicPartition> checkpointAssignment = Collections.singletonList(new TopicPartition(checkpointTopic, 0));
consumer.assign(checkpointAssignment);
consumer.seekToBeginning(checkpointAssignment);
while (System.currentTimeMillis() < deadline && !endOfStream(consumer, checkpointAssignment)) {
ConsumerRecords<byte[], byte[]> records = consumer.poll(timeout);
for (ConsumerRecord<byte[], byte[]> record : records) {
try {
Checkpoint checkpoint = Checkpoint.deserializeRecord(record);
if (checkpoint.consumerGroupId().equals(consumerGroupId)) {
offsets.put(checkpoint.topicPartition(), checkpoint.offsetAndMetadata());
}
} catch (SchemaException e) {
log.info("Could not deserialize record. Skipping.", e);
}
}
}
log.info("Consumed {} checkpoint records for {} from {}.", offsets.size(), consumerGroupId, checkpointTopic);
}
return offsets;
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.
the class RegexSourceIntegrationTest method testRegexMatchesTopicsAWhenDeleted.
@Test
public void testRegexMatchesTopicsAWhenDeleted() throws Exception {
final Serde<String> stringSerde = Serdes.String();
final List<String> expectedFirstAssignment = Arrays.asList("TEST-TOPIC-A", "TEST-TOPIC-B");
final List<String> expectedSecondAssignment = Collections.singletonList("TEST-TOPIC-B");
final List<String> assignedTopics = new CopyOnWriteArrayList<>();
try {
CLUSTER.createTopics("TEST-TOPIC-A", "TEST-TOPIC-B");
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, String> pattern1Stream = builder.stream(Pattern.compile("TEST-TOPIC-[A-Z]"));
pattern1Stream.to(outputTopic, Produced.with(stringSerde, stringSerde));
streams = new KafkaStreams(builder.build(), streamsConfiguration, new DefaultKafkaClientSupplier() {
@Override
public Consumer<byte[], byte[]> getConsumer(final Map<String, Object> config) {
return new KafkaConsumer<byte[], byte[]>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer()) {
@Override
public void subscribe(final Pattern topics, final ConsumerRebalanceListener listener) {
super.subscribe(topics, new TheConsumerRebalanceListener(assignedTopics, listener));
}
};
}
});
streams.start();
TestUtils.waitForCondition(() -> assignedTopics.equals(expectedFirstAssignment), STREAM_TASKS_NOT_UPDATED);
} finally {
CLUSTER.deleteTopic("TEST-TOPIC-A");
}
TestUtils.waitForCondition(() -> assignedTopics.equals(expectedSecondAssignment), STREAM_TASKS_NOT_UPDATED);
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.
the class RegexSourceIntegrationTest method testRegexMatchesTopicsAWhenCreated.
@Test
public void testRegexMatchesTopicsAWhenCreated() throws Exception {
try {
final Serde<String> stringSerde = Serdes.String();
final List<String> expectedFirstAssignment = Collections.singletonList("TEST-TOPIC-1");
// we compare lists of subscribed topics and hence requiring the order as well; this is guaranteed
// with KIP-429 since we would NOT revoke TEST-TOPIC-1 but only add TEST-TOPIC-2 so the list is always
// in the order of "TEST-TOPIC-1, TEST-TOPIC-2". Note if KIP-429 behavior ever changed it may become a flaky test
final List<String> expectedSecondAssignment = Arrays.asList("TEST-TOPIC-1", "TEST-TOPIC-2");
CLUSTER.createTopic("TEST-TOPIC-1");
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, String> pattern1Stream = builder.stream(Pattern.compile("TEST-TOPIC-\\d"));
pattern1Stream.to(outputTopic, Produced.with(stringSerde, stringSerde));
final List<String> assignedTopics = new CopyOnWriteArrayList<>();
streams = new KafkaStreams(builder.build(), streamsConfiguration, new DefaultKafkaClientSupplier() {
@Override
public Consumer<byte[], byte[]> getConsumer(final Map<String, Object> config) {
return new KafkaConsumer<byte[], byte[]>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer()) {
@Override
public void subscribe(final Pattern topics, final ConsumerRebalanceListener listener) {
super.subscribe(topics, new TheConsumerRebalanceListener(assignedTopics, listener));
}
};
}
});
streams.start();
TestUtils.waitForCondition(() -> assignedTopics.equals(expectedFirstAssignment), STREAM_TASKS_NOT_UPDATED);
CLUSTER.createTopic("TEST-TOPIC-2");
TestUtils.waitForCondition(() -> assignedTopics.equals(expectedSecondAssignment), STREAM_TASKS_NOT_UPDATED);
streams.close();
} finally {
CLUSTER.deleteTopicsAndWait("TEST-TOPIC-1", "TEST-TOPIC-2");
}
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.
the class StreamsResetter method maybeResetInputAndSeekToEndIntermediateTopicOffsets.
private int maybeResetInputAndSeekToEndIntermediateTopicOffsets(final Map<Object, Object> consumerConfig, final boolean dryRun) throws IOException, ParseException {
final List<String> inputTopics = options.valuesOf(inputTopicsOption);
final List<String> intermediateTopics = options.valuesOf(intermediateTopicsOption);
int topicNotFound = EXIT_CODE_SUCCESS;
final List<String> notFoundInputTopics = new ArrayList<>();
final List<String> notFoundIntermediateTopics = new ArrayList<>();
final String groupId = options.valueOf(applicationIdOption);
if (inputTopics.size() == 0 && intermediateTopics.size() == 0) {
System.out.println("No input or intermediate topics specified. Skipping seek.");
return EXIT_CODE_SUCCESS;
}
if (inputTopics.size() != 0) {
System.out.println("Reset-offsets for input topics " + inputTopics);
}
if (intermediateTopics.size() != 0) {
System.out.println("Seek-to-end for intermediate topics " + intermediateTopics);
}
final Set<String> topicsToSubscribe = new HashSet<>(inputTopics.size() + intermediateTopics.size());
for (final String topic : inputTopics) {
if (!allTopics.contains(topic)) {
notFoundInputTopics.add(topic);
} else {
topicsToSubscribe.add(topic);
}
}
for (final String topic : intermediateTopics) {
if (!allTopics.contains(topic)) {
notFoundIntermediateTopics.add(topic);
} else {
topicsToSubscribe.add(topic);
}
}
if (!notFoundInputTopics.isEmpty()) {
System.out.println("Following input topics are not found, skipping them");
for (final String topic : notFoundInputTopics) {
System.out.println("Topic: " + topic);
}
topicNotFound = EXIT_CODE_ERROR;
}
if (!notFoundIntermediateTopics.isEmpty()) {
System.out.println("Following intermediate topics are not found, skipping them");
for (final String topic : notFoundIntermediateTopics) {
System.out.println("Topic:" + topic);
}
topicNotFound = EXIT_CODE_ERROR;
}
// try to poll with an empty subscription)
if (topicsToSubscribe.isEmpty()) {
return topicNotFound;
}
final Properties config = new Properties();
config.putAll(consumerConfig);
config.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId);
config.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
try (final KafkaConsumer<byte[], byte[]> client = new KafkaConsumer<>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer())) {
final Collection<TopicPartition> partitions = topicsToSubscribe.stream().map(client::partitionsFor).flatMap(Collection::stream).map(info -> new TopicPartition(info.topic(), info.partition())).collect(Collectors.toList());
client.assign(partitions);
final Set<TopicPartition> inputTopicPartitions = new HashSet<>();
final Set<TopicPartition> intermediateTopicPartitions = new HashSet<>();
for (final TopicPartition p : partitions) {
final String topic = p.topic();
if (isInputTopic(topic)) {
inputTopicPartitions.add(p);
} else if (isIntermediateTopic(topic)) {
intermediateTopicPartitions.add(p);
} else {
System.err.println("Skipping invalid partition: " + p);
}
}
maybeReset(groupId, client, inputTopicPartitions);
maybeSeekToEnd(groupId, client, intermediateTopicPartitions);
if (!dryRun) {
for (final TopicPartition p : partitions) {
client.position(p);
}
client.commitSync();
}
} catch (final IOException | ParseException e) {
System.err.println("ERROR: Resetting offsets failed.");
throw e;
}
System.out.println("Done.");
return topicNotFound;
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.
the class TopologyTestDriverTest method shouldPassRecordHeadersIntoSerializersAndDeserializers.
@Test
public void shouldPassRecordHeadersIntoSerializersAndDeserializers() {
testDriver = new TopologyTestDriver(setupSourceSinkTopology(), config);
final AtomicBoolean passedHeadersToKeySerializer = new AtomicBoolean(false);
final AtomicBoolean passedHeadersToValueSerializer = new AtomicBoolean(false);
final AtomicBoolean passedHeadersToKeyDeserializer = new AtomicBoolean(false);
final AtomicBoolean passedHeadersToValueDeserializer = new AtomicBoolean(false);
final Serializer<byte[]> keySerializer = new ByteArraySerializer() {
@Override
public byte[] serialize(final String topic, final Headers headers, final byte[] data) {
passedHeadersToKeySerializer.set(true);
return serialize(topic, data);
}
};
final Serializer<byte[]> valueSerializer = new ByteArraySerializer() {
@Override
public byte[] serialize(final String topic, final Headers headers, final byte[] data) {
passedHeadersToValueSerializer.set(true);
return serialize(topic, data);
}
};
final Deserializer<byte[]> keyDeserializer = new ByteArrayDeserializer() {
@Override
public byte[] deserialize(final String topic, final Headers headers, final byte[] data) {
passedHeadersToKeyDeserializer.set(true);
return deserialize(topic, data);
}
};
final Deserializer<byte[]> valueDeserializer = new ByteArrayDeserializer() {
@Override
public byte[] deserialize(final String topic, final Headers headers, final byte[] data) {
passedHeadersToValueDeserializer.set(true);
return deserialize(topic, data);
}
};
final TestInputTopic<byte[], byte[]> inputTopic = testDriver.createInputTopic(SOURCE_TOPIC_1, keySerializer, valueSerializer);
final TestOutputTopic<byte[], byte[]> outputTopic = testDriver.createOutputTopic(SINK_TOPIC_1, keyDeserializer, valueDeserializer);
inputTopic.pipeInput(testRecord1);
outputTopic.readRecord();
assertThat(passedHeadersToKeySerializer.get(), equalTo(true));
assertThat(passedHeadersToValueSerializer.get(), equalTo(true));
assertThat(passedHeadersToKeyDeserializer.get(), equalTo(true));
assertThat(passedHeadersToValueDeserializer.get(), equalTo(true));
}
Aggregations