Search in sources :

Example 91 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class FetcherTest method testFetchPositionAfterException.

@Test
public void testFetchPositionAfterException() {
    // verify the advancement in the next fetch offset equals to the number of fetched records when
    // some fetched partitions cause Exception. This ensures that consumer won't lose record upon exception
    buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED);
    assignFromUser(mkSet(tp0, tp1));
    subscriptions.seek(tp0, 1);
    subscriptions.seek(tp1, 1);
    assertEquals(1, fetcher.sendFetches());
    Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new LinkedHashMap<>();
    partitions.put(tidp1, new FetchResponseData.PartitionData().setPartitionIndex(tp1.partition()).setHighWatermark(100).setRecords(records));
    partitions.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()).setHighWatermark(100));
    client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
    consumerClient.poll(time.timer(0));
    List<ConsumerRecord<byte[], byte[]>> allFetchedRecords = new ArrayList<>();
    fetchRecordsInto(allFetchedRecords);
    assertEquals(1, subscriptions.position(tp0).offset);
    assertEquals(4, subscriptions.position(tp1).offset);
    assertEquals(3, allFetchedRecords.size());
    OffsetOutOfRangeException e = assertThrows(OffsetOutOfRangeException.class, () -> fetchRecordsInto(allFetchedRecords));
    assertEquals(singleton(tp0), e.offsetOutOfRangePartitions().keySet());
    assertEquals(1L, e.offsetOutOfRangePartitions().get(tp0).longValue());
    assertEquals(1, subscriptions.position(tp0).offset);
    assertEquals(4, subscriptions.position(tp1).offset);
    assertEquals(3, allFetchedRecords.size());
}
Also used : PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) ArrayList(java.util.ArrayList) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) LinkedHashMap(java.util.LinkedHashMap) Test(org.junit.jupiter.api.Test)

Example 92 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class FetcherTest method testUpdatePositionWithLastRecordMissingFromBatch.

@Test
public void testUpdatePositionWithLastRecordMissingFromBatch() {
    buildFetcher();
    MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("0".getBytes(), "v".getBytes()), new SimpleRecord("1".getBytes(), "v".getBytes()), new SimpleRecord("2".getBytes(), "v".getBytes()), new SimpleRecord(null, "value".getBytes()));
    // Remove the last record to simulate compaction
    MemoryRecords.FilterResult result = records.filterTo(tp0, new MemoryRecords.RecordFilter(0, 0) {

        @Override
        protected BatchRetentionResult checkBatchRetention(RecordBatch batch) {
            return new BatchRetentionResult(BatchRetention.DELETE_EMPTY, false);
        }

        @Override
        protected boolean shouldRetainRecord(RecordBatch recordBatch, Record record) {
            return record.key() != null;
        }
    }, ByteBuffer.allocate(1024), Integer.MAX_VALUE, BufferSupplier.NO_CACHING);
    result.outputBuffer().flip();
    MemoryRecords compactedRecords = MemoryRecords.readableRecords(result.outputBuffer());
    assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    assertEquals(1, fetcher.sendFetches());
    client.prepareResponse(fullFetchResponse(tidp0, compactedRecords, Errors.NONE, 100L, 0));
    consumerClient.poll(time.timer(0));
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> allFetchedRecords = fetchedRecords();
    assertTrue(allFetchedRecords.containsKey(tp0));
    List<ConsumerRecord<byte[], byte[]>> fetchedRecords = allFetchedRecords.get(tp0);
    assertEquals(3, fetchedRecords.size());
    for (int i = 0; i < 3; i++) {
        assertEquals(Integer.toString(i), new String(fetchedRecords.get(i).key()));
    }
    // The next offset should point to the next batch
    assertEquals(4L, subscriptions.position(tp0).offset);
}
Also used : DefaultRecordBatch(org.apache.kafka.common.record.DefaultRecordBatch) RecordBatch(org.apache.kafka.common.record.RecordBatch) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) Record(org.apache.kafka.common.record.Record) LegacyRecord(org.apache.kafka.common.record.LegacyRecord) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.jupiter.api.Test)

Example 93 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class FetcherTest method testFetchWithNoTopicId.

@Test
public void testFetchWithNoTopicId() {
    // Should work and default to using old request type.
    buildFetcher();
    TopicIdPartition noId = new TopicIdPartition(Uuid.ZERO_UUID, new TopicPartition("noId", 0));
    assignFromUserNoId(singleton(noId.topicPartition()));
    subscriptions.seek(noId.topicPartition(), 0);
    // Fetch should use request version 12
    assertEquals(1, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    client.prepareResponse(fetchRequestMatcher((short) 12, noId, 0, Optional.of(validLeaderEpoch)), fullFetchResponse(noId, this.records, Errors.NONE, 100L, 0));
    consumerClient.poll(time.timer(0));
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchedRecords();
    assertTrue(partitionRecords.containsKey(noId.topicPartition()));
    List<ConsumerRecord<byte[], byte[]>> records = partitionRecords.get(noId.topicPartition());
    assertEquals(3, records.size());
    // this is the next fetching position
    assertEquals(4L, subscriptions.position(noId.topicPartition()).offset);
    long offset = 1;
    for (ConsumerRecord<byte[], byte[]> record : records) {
        assertEquals(offset, record.offset());
        offset += 1;
    }
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.jupiter.api.Test)

Example 94 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class FetcherTest method testReadCommittedWithCommittedAndAbortedTransactions.

@Test
public void testReadCommittedWithCommittedAndAbortedTransactions() {
    buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    List<FetchResponseData.AbortedTransaction> abortedTransactions = new ArrayList<>();
    long pid1 = 1L;
    long pid2 = 2L;
    // Appends for producer 1 (eventually committed)
    appendTransactionalRecords(buffer, pid1, 0L, new SimpleRecord("commit1-1".getBytes(), "value".getBytes()), new SimpleRecord("commit1-2".getBytes(), "value".getBytes()));
    // Appends for producer 2 (eventually aborted)
    appendTransactionalRecords(buffer, pid2, 2L, new SimpleRecord("abort2-1".getBytes(), "value".getBytes()));
    // commit producer 1
    commitTransaction(buffer, pid1, 3L);
    // append more for producer 2 (eventually aborted)
    appendTransactionalRecords(buffer, pid2, 4L, new SimpleRecord("abort2-2".getBytes(), "value".getBytes()));
    // abort producer 2
    abortTransaction(buffer, pid2, 5L);
    abortedTransactions.add(new FetchResponseData.AbortedTransaction().setProducerId(pid2).setFirstOffset(2L));
    // New transaction for producer 1 (eventually aborted)
    appendTransactionalRecords(buffer, pid1, 6L, new SimpleRecord("abort1-1".getBytes(), "value".getBytes()));
    // New transaction for producer 2 (eventually committed)
    appendTransactionalRecords(buffer, pid2, 7L, new SimpleRecord("commit2-1".getBytes(), "value".getBytes()));
    // Add messages for producer 1 (eventually aborted)
    appendTransactionalRecords(buffer, pid1, 8L, new SimpleRecord("abort1-2".getBytes(), "value".getBytes()));
    // abort producer 1
    abortTransaction(buffer, pid1, 9L);
    abortedTransactions.add(new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(6));
    // commit producer 2
    commitTransaction(buffer, pid2, 10L);
    buffer.flip();
    MemoryRecords records = MemoryRecords.readableRecords(buffer);
    assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    // normal fetch
    assertEquals(1, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
    consumerClient.poll(time.timer(0));
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
    assertTrue(fetchedRecords.containsKey(tp0));
    // There are only 3 committed records
    List<ConsumerRecord<byte[], byte[]>> fetchedConsumerRecords = fetchedRecords.get(tp0);
    Set<String> fetchedKeys = new HashSet<>();
    for (ConsumerRecord<byte[], byte[]> consumerRecord : fetchedConsumerRecords) {
        fetchedKeys.add(new String(consumerRecord.key(), StandardCharsets.UTF_8));
    }
    assertEquals(mkSet("commit1-1", "commit1-2", "commit2-1"), fetchedKeys);
}
Also used : ArrayList(java.util.ArrayList) ByteBuffer(java.nio.ByteBuffer) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) HashSet(java.util.HashSet) Test(org.junit.jupiter.api.Test)

Example 95 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class InMemoryWindowStore method init.

@Deprecated
@Override
public void init(final ProcessorContext context, final StateStore root) {
    this.context = context;
    final StreamsMetricsImpl metrics = ProcessorContextUtils.getMetricsImpl(context);
    final String threadId = Thread.currentThread().getName();
    final String taskName = context.taskId().toString();
    expiredRecordSensor = TaskMetrics.droppedRecordsSensor(threadId, taskName, metrics);
    if (root != null) {
        final boolean consistencyEnabled = StreamsConfig.InternalConfig.getBoolean(context.appConfigs(), IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED, false);
        context.register(root, (RecordBatchingStateRestoreCallback) records -> {
            for (final ConsumerRecord<byte[], byte[]> record : records) {
                put(Bytes.wrap(extractStoreKeyBytes(record.key())), record.value(), extractStoreTimestamp(record.key()));
                ChangelogRecordDeserializationHelper.applyChecksAndUpdatePosition(record, consistencyEnabled, position);
            }
        });
    }
    open = true;
}
Also used : StreamsConfig(org.apache.kafka.streams.StreamsConfig) TaskMetrics(org.apache.kafka.streams.processor.internals.metrics.TaskMetrics) LoggerFactory(org.slf4j.LoggerFactory) PositionBound(org.apache.kafka.streams.query.PositionBound) RecordBatchingStateRestoreCallback(org.apache.kafka.streams.processor.internals.RecordBatchingStateRestoreCallback) WindowStore(org.apache.kafka.streams.state.WindowStore) ByteBuffer(java.nio.ByteBuffer) StoreToProcessorContextAdapter(org.apache.kafka.streams.processor.internals.StoreToProcessorContextAdapter) StateStoreContext(org.apache.kafka.streams.processor.StateStoreContext) Windowed(org.apache.kafka.streams.kstream.Windowed) Map(java.util.Map) ProcessorContextUtils(org.apache.kafka.streams.processor.internals.ProcessorContextUtils) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) NoSuchElementException(java.util.NoSuchElementException) QueryResult(org.apache.kafka.streams.query.QueryResult) Sensor(org.apache.kafka.common.metrics.Sensor) QueryConfig(org.apache.kafka.streams.query.QueryConfig) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) Position(org.apache.kafka.streams.query.Position) Query(org.apache.kafka.streams.query.Query) WindowKeySchema.extractStoreTimestamp(org.apache.kafka.streams.state.internals.WindowKeySchema.extractStoreTimestamp) ConcurrentNavigableMap(java.util.concurrent.ConcurrentNavigableMap) KeyValue(org.apache.kafka.streams.KeyValue) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) ChangelogRecordDeserializationHelper(org.apache.kafka.streams.processor.internals.ChangelogRecordDeserializationHelper) Bytes(org.apache.kafka.common.utils.Bytes) Objects(java.util.Objects) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) KeyValueIterator(org.apache.kafka.streams.state.KeyValueIterator) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap) WindowKeySchema.extractStoreKeyBytes(org.apache.kafka.streams.state.internals.WindowKeySchema.extractStoreKeyBytes) StateStore(org.apache.kafka.streams.processor.StateStore) WindowStoreIterator(org.apache.kafka.streams.state.WindowStoreIterator) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TimeWindow(org.apache.kafka.streams.kstream.internals.TimeWindow) IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED(org.apache.kafka.streams.StreamsConfig.InternalConfig.IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Aggregations

ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)314 TopicPartition (org.apache.kafka.common.TopicPartition)160 Test (org.junit.Test)145 ArrayList (java.util.ArrayList)123 List (java.util.List)100 HashMap (java.util.HashMap)98 Map (java.util.Map)70 RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)61 ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)51 Test (org.junit.jupiter.api.Test)35 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)33 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)31 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)31 LinkedHashMap (java.util.LinkedHashMap)30 Header (org.apache.kafka.common.header.Header)29 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)28 TimeUnit (java.util.concurrent.TimeUnit)27 Set (java.util.Set)24 Collectors (java.util.stream.Collectors)24 ByteBuffer (java.nio.ByteBuffer)22