use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.
the class RegexSourceIntegrationTest method testMultipleConsumersCanReadFromPartitionedTopic.
@Test
public void testMultipleConsumersCanReadFromPartitionedTopic() throws Exception {
KafkaStreams partitionedStreamsLeader = null;
KafkaStreams partitionedStreamsFollower = null;
try {
final Serde<String> stringSerde = Serdes.String();
final StreamsBuilder builderLeader = new StreamsBuilder();
final StreamsBuilder builderFollower = new StreamsBuilder();
final List<String> expectedAssignment = Arrays.asList(PARTITIONED_TOPIC_1, PARTITIONED_TOPIC_2);
final KStream<String, String> partitionedStreamLeader = builderLeader.stream(Pattern.compile("partitioned-\\d"));
final KStream<String, String> partitionedStreamFollower = builderFollower.stream(Pattern.compile("partitioned-\\d"));
partitionedStreamLeader.to(outputTopic, Produced.with(stringSerde, stringSerde));
partitionedStreamFollower.to(outputTopic, Produced.with(stringSerde, stringSerde));
final List<String> leaderAssignment = new CopyOnWriteArrayList<>();
final List<String> followerAssignment = new CopyOnWriteArrayList<>();
partitionedStreamsLeader = new KafkaStreams(builderLeader.build(), streamsConfiguration, new DefaultKafkaClientSupplier() {
@Override
public Consumer<byte[], byte[]> getConsumer(final Map<String, Object> config) {
return new KafkaConsumer<byte[], byte[]>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer()) {
@Override
public void subscribe(final Pattern topics, final ConsumerRebalanceListener listener) {
super.subscribe(topics, new TheConsumerRebalanceListener(leaderAssignment, listener));
}
};
}
});
partitionedStreamsFollower = new KafkaStreams(builderFollower.build(), streamsConfiguration, new DefaultKafkaClientSupplier() {
@Override
public Consumer<byte[], byte[]> getConsumer(final Map<String, Object> config) {
return new KafkaConsumer<byte[], byte[]>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer()) {
@Override
public void subscribe(final Pattern topics, final ConsumerRebalanceListener listener) {
super.subscribe(topics, new TheConsumerRebalanceListener(followerAssignment, listener));
}
};
}
});
partitionedStreamsLeader.start();
partitionedStreamsFollower.start();
TestUtils.waitForCondition(() -> followerAssignment.equals(expectedAssignment) && leaderAssignment.equals(expectedAssignment), "topic assignment not completed");
} finally {
if (partitionedStreamsLeader != null) {
partitionedStreamsLeader.close();
}
if (partitionedStreamsFollower != null) {
partitionedStreamsFollower.close();
}
}
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.
the class FetcherTest method testFetchedRecordsRaisesOnSerializationErrors.
@Test
public void testFetchedRecordsRaisesOnSerializationErrors() {
// raise an exception from somewhere in the middle of the fetch response
// so that we can verify that our position does not advance after raising
ByteArrayDeserializer deserializer = new ByteArrayDeserializer() {
int i = 0;
@Override
public byte[] deserialize(String topic, byte[] data) {
if (i++ % 2 == 1) {
// Should be blocked on the value deserialization of the first record.
assertEquals("value-1", new String(data, StandardCharsets.UTF_8));
throw new SerializationException();
}
return data;
}
};
buildFetcher(deserializer, deserializer);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 1);
client.prepareResponse(matchesOffset(tidp0, 1), fullFetchResponse(tidp0, this.records, Errors.NONE, 100L, 0));
assertEquals(1, fetcher.sendFetches());
consumerClient.poll(time.timer(0));
// The fetcher should block on Deserialization error
for (int i = 0; i < 2; i++) {
try {
fetcher.collectFetch();
fail("fetchedRecords should have raised");
} catch (SerializationException e) {
// the position should not advance since no data has been returned
assertEquals(1, subscriptions.position(tp0).offset);
}
}
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.
the class FetcherTest method testCompletedFetchRemoval.
@Test
public void testCompletedFetchRemoval() {
// Ensure the removal of completed fetches that cause an Exception if and only if they contain empty records.
buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED);
assignFromUser(mkSet(tp0, tp1, tp2, tp3));
subscriptions.seek(tp0, 1);
subscriptions.seek(tp1, 1);
subscriptions.seek(tp2, 1);
subscriptions.seek(tp3, 1);
assertEquals(1, fetcher.sendFetches());
Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new LinkedHashMap<>();
partitions.put(tidp1, new FetchResponseData.PartitionData().setPartitionIndex(tp1.partition()).setHighWatermark(100).setRecords(records));
partitions.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()).setHighWatermark(100));
partitions.put(tidp2, new FetchResponseData.PartitionData().setPartitionIndex(tp2.partition()).setHighWatermark(100).setLastStableOffset(4).setLogStartOffset(0).setRecords(nextRecords));
partitions.put(tidp3, new FetchResponseData.PartitionData().setPartitionIndex(tp3.partition()).setHighWatermark(100).setLastStableOffset(4).setLogStartOffset(0).setRecords(partialRecords));
client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
consumerClient.poll(time.timer(0));
List<ConsumerRecord<byte[], byte[]>> fetchedRecords = new ArrayList<>();
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsByPartition = fetchedRecords();
for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
assertEquals(fetchedRecords.size(), subscriptions.position(tp1).offset - 1);
assertEquals(4, subscriptions.position(tp1).offset);
assertEquals(3, fetchedRecords.size());
List<OffsetOutOfRangeException> oorExceptions = new ArrayList<>();
try {
recordsByPartition = fetchedRecords();
for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
} catch (OffsetOutOfRangeException oor) {
oorExceptions.add(oor);
}
// Should have received one OffsetOutOfRangeException for partition tp1
assertEquals(1, oorExceptions.size());
OffsetOutOfRangeException oor = oorExceptions.get(0);
assertTrue(oor.offsetOutOfRangePartitions().containsKey(tp0));
assertEquals(oor.offsetOutOfRangePartitions().size(), 1);
recordsByPartition = fetchedRecords();
for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
// Should not have received an Exception for tp2.
assertEquals(6, subscriptions.position(tp2).offset);
assertEquals(5, fetchedRecords.size());
int numExceptionsExpected = 3;
List<KafkaException> kafkaExceptions = new ArrayList<>();
for (int i = 1; i <= numExceptionsExpected; i++) {
try {
recordsByPartition = fetchedRecords();
for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
} catch (KafkaException e) {
kafkaExceptions.add(e);
}
}
// Should have received as much as numExceptionsExpected Kafka exceptions for tp3.
assertEquals(numExceptionsExpected, kafkaExceptions.size());
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.
the class FetcherTest method testMultipleAbortMarkers.
@Test
public void testMultipleAbortMarkers() {
buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
int currentOffset = 0;
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "abort1-1".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "abort1-2".getBytes(), "value".getBytes()));
currentOffset += abortTransaction(buffer, 1L, currentOffset);
// Duplicate abort -- should be ignored.
currentOffset += abortTransaction(buffer, 1L, currentOffset);
// Now commit a transaction.
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "commit1-1".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "commit1-2".getBytes(), "value".getBytes()));
commitTransaction(buffer, 1L, currentOffset);
buffer.flip();
List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(0));
MemoryRecords records = MemoryRecords.readableRecords(buffer);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
assertTrue(fetchedRecords.containsKey(tp0));
assertEquals(fetchedRecords.get(tp0).size(), 2);
List<ConsumerRecord<byte[], byte[]>> fetchedConsumerRecords = fetchedRecords.get(tp0);
Set<String> committedKeys = new HashSet<>(Arrays.asList("commit1-1", "commit1-2"));
Set<String> actuallyCommittedKeys = new HashSet<>();
for (ConsumerRecord<byte[], byte[]> consumerRecord : fetchedConsumerRecords) {
actuallyCommittedKeys.add(new String(consumerRecord.key(), StandardCharsets.UTF_8));
}
assertEquals(actuallyCommittedKeys, committedKeys);
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.
the class FetcherTest method testFetcherConcurrency.
@Test
public void testFetcherConcurrency() throws Exception {
int numPartitions = 20;
Set<TopicPartition> topicPartitions = new HashSet<>();
for (int i = 0; i < numPartitions; i++) topicPartitions.add(new TopicPartition(topicName, i));
LogContext logContext = new LogContext();
buildDependencies(new MetricConfig(), Long.MAX_VALUE, new SubscriptionState(logContext, OffsetResetStrategy.EARLIEST), logContext);
fetcher = new Fetcher<byte[], byte[]>(new LogContext(), consumerClient, minBytes, maxBytes, maxWaitMs, fetchSize, 2 * numPartitions, true, "", new ByteArrayDeserializer(), new ByteArrayDeserializer(), metadata, subscriptions, metrics, metricsRegistry, time, retryBackoffMs, requestTimeoutMs, IsolationLevel.READ_UNCOMMITTED, apiVersions) {
@Override
protected FetchSessionHandler sessionHandler(int id) {
final FetchSessionHandler handler = super.sessionHandler(id);
if (handler == null)
return null;
else {
return new FetchSessionHandler(new LogContext(), id) {
@Override
public Builder newBuilder() {
verifySessionPartitions();
return handler.newBuilder();
}
@Override
public boolean handleResponse(FetchResponse response, short version) {
verifySessionPartitions();
return handler.handleResponse(response, version);
}
@Override
public void handleError(Throwable t) {
verifySessionPartitions();
handler.handleError(t);
}
// Verify that session partitions can be traversed safely.
private void verifySessionPartitions() {
try {
Field field = FetchSessionHandler.class.getDeclaredField("sessionPartitions");
field.setAccessible(true);
LinkedHashMap<?, ?> sessionPartitions = (LinkedHashMap<?, ?>) field.get(handler);
for (Map.Entry<?, ?> entry : sessionPartitions.entrySet()) {
// If `sessionPartitions` are modified on another thread, Thread.yield will increase the
// possibility of ConcurrentModificationException if appropriate synchronization is not used.
Thread.yield();
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
}
}
};
MetadataResponse initialMetadataResponse = RequestTestUtils.metadataUpdateWithIds(1, singletonMap(topicName, numPartitions), tp -> validLeaderEpoch, topicIds);
client.updateMetadata(initialMetadataResponse);
fetchSize = 10000;
assignFromUser(topicPartitions);
topicPartitions.forEach(tp -> subscriptions.seek(tp, 0L));
AtomicInteger fetchesRemaining = new AtomicInteger(1000);
executorService = Executors.newSingleThreadExecutor();
Future<?> future = executorService.submit(() -> {
while (fetchesRemaining.get() > 0) {
synchronized (consumerClient) {
if (!client.requests().isEmpty()) {
ClientRequest request = client.requests().peek();
FetchRequest fetchRequest = (FetchRequest) request.requestBuilder().build();
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseMap = new LinkedHashMap<>();
for (Map.Entry<TopicIdPartition, FetchRequest.PartitionData> entry : fetchRequest.fetchData(topicNames).entrySet()) {
TopicIdPartition tp = entry.getKey();
long offset = entry.getValue().fetchOffset;
responseMap.put(tp, new FetchResponseData.PartitionData().setPartitionIndex(tp.topicPartition().partition()).setHighWatermark(offset + 2).setLastStableOffset(offset + 2).setLogStartOffset(0).setRecords(buildRecords(offset, 2, offset)));
}
client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap));
consumerClient.poll(time.timer(0));
}
}
}
return fetchesRemaining.get();
});
Map<TopicPartition, Long> nextFetchOffsets = topicPartitions.stream().collect(Collectors.toMap(Function.identity(), t -> 0L));
while (fetchesRemaining.get() > 0 && !future.isDone()) {
if (fetcher.sendFetches() == 1) {
synchronized (consumerClient) {
consumerClient.poll(time.timer(0));
}
}
if (fetcher.hasCompletedFetches()) {
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
if (!fetchedRecords.isEmpty()) {
fetchesRemaining.decrementAndGet();
fetchedRecords.forEach((tp, records) -> {
assertEquals(2, records.size());
long nextOffset = nextFetchOffsets.get(tp);
assertEquals(nextOffset, records.get(0).offset());
assertEquals(nextOffset + 1, records.get(1).offset());
nextFetchOffsets.put(tp, nextOffset + 2);
});
}
}
}
assertEquals(0, future.get());
}
Aggregations