Search in sources :

Example 11 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class ConsumerRecordTest method testConstructorsWithChecksum.

@Test
@Deprecated
public void testConstructorsWithChecksum() {
    String topic = "topic";
    int partition = 0;
    long offset = 23;
    long timestamp = 23434217432432L;
    TimestampType timestampType = TimestampType.CREATE_TIME;
    String key = "key";
    String value = "value";
    long checksum = 50L;
    int serializedKeySize = 100;
    int serializedValueSize = 1142;
    ConsumerRecord<String, String> record = new ConsumerRecord<>(topic, partition, offset, timestamp, timestampType, checksum, serializedKeySize, serializedValueSize, key, value);
    assertEquals(topic, record.topic());
    assertEquals(partition, record.partition());
    assertEquals(offset, record.offset());
    assertEquals(key, record.key());
    assertEquals(value, record.value());
    assertEquals(timestampType, record.timestampType());
    assertEquals(timestamp, record.timestamp());
    assertEquals(serializedKeySize, record.serializedKeySize());
    assertEquals(serializedValueSize, record.serializedValueSize());
    assertEquals(Optional.empty(), record.leaderEpoch());
    assertEquals(new RecordHeaders(), record.headers());
    RecordHeaders headers = new RecordHeaders();
    headers.add(new RecordHeader("header key", "header value".getBytes(StandardCharsets.UTF_8)));
    record = new ConsumerRecord<>(topic, partition, offset, timestamp, timestampType, checksum, serializedKeySize, serializedValueSize, key, value, headers);
    assertEquals(topic, record.topic());
    assertEquals(partition, record.partition());
    assertEquals(offset, record.offset());
    assertEquals(key, record.key());
    assertEquals(value, record.value());
    assertEquals(timestampType, record.timestampType());
    assertEquals(timestamp, record.timestamp());
    assertEquals(serializedKeySize, record.serializedKeySize());
    assertEquals(serializedValueSize, record.serializedValueSize());
    assertEquals(Optional.empty(), record.leaderEpoch());
    assertEquals(headers, record.headers());
    Optional<Integer> leaderEpoch = Optional.of(10);
    record = new ConsumerRecord<>(topic, partition, offset, timestamp, timestampType, checksum, serializedKeySize, serializedValueSize, key, value, headers, leaderEpoch);
    assertEquals(topic, record.topic());
    assertEquals(partition, record.partition());
    assertEquals(offset, record.offset());
    assertEquals(key, record.key());
    assertEquals(value, record.value());
    assertEquals(timestampType, record.timestampType());
    assertEquals(timestamp, record.timestamp());
    assertEquals(serializedKeySize, record.serializedKeySize());
    assertEquals(serializedValueSize, record.serializedValueSize());
    assertEquals(leaderEpoch, record.leaderEpoch());
    assertEquals(headers, record.headers());
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) TimestampType(org.apache.kafka.common.record.TimestampType) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Test(org.junit.jupiter.api.Test)

Example 12 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class MockConsumerTest method testSimpleMock.

@Test
public void testSimpleMock() {
    consumer.subscribe(Collections.singleton("test"));
    assertEquals(0, consumer.poll(Duration.ZERO).count());
    consumer.rebalance(Arrays.asList(new TopicPartition("test", 0), new TopicPartition("test", 1)));
    // Mock consumers need to seek manually since they cannot automatically reset offsets
    HashMap<TopicPartition, Long> beginningOffsets = new HashMap<>();
    beginningOffsets.put(new TopicPartition("test", 0), 0L);
    beginningOffsets.put(new TopicPartition("test", 1), 0L);
    consumer.updateBeginningOffsets(beginningOffsets);
    consumer.seek(new TopicPartition("test", 0), 0);
    ConsumerRecord<String, String> rec1 = new ConsumerRecord<>("test", 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, "key1", "value1", new RecordHeaders(), Optional.empty());
    ConsumerRecord<String, String> rec2 = new ConsumerRecord<>("test", 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, "key2", "value2", new RecordHeaders(), Optional.empty());
    consumer.addRecord(rec1);
    consumer.addRecord(rec2);
    ConsumerRecords<String, String> recs = consumer.poll(Duration.ofMillis(1));
    Iterator<ConsumerRecord<String, String>> iter = recs.iterator();
    assertEquals(rec1, iter.next());
    assertEquals(rec2, iter.next());
    assertFalse(iter.hasNext());
    final TopicPartition tp = new TopicPartition("test", 0);
    assertEquals(2L, consumer.position(tp));
    consumer.commitSync();
    assertEquals(2L, consumer.committed(Collections.singleton(tp)).get(tp).offset());
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) Test(org.junit.jupiter.api.Test)

Example 13 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class MirrorSourceTaskTest method testPoll.

@Test
public void testPoll() {
    // Create a consumer mock
    byte[] key1 = "abc".getBytes();
    byte[] value1 = "fgh".getBytes();
    byte[] key2 = "123".getBytes();
    byte[] value2 = "456".getBytes();
    List<ConsumerRecord<byte[], byte[]>> consumerRecordsList = new ArrayList<>();
    String topicName = "test";
    String headerKey = "key";
    RecordHeaders headers = new RecordHeaders(new Header[] { new RecordHeader(headerKey, "value".getBytes()) });
    consumerRecordsList.add(new ConsumerRecord<>(topicName, 0, 0, System.currentTimeMillis(), TimestampType.CREATE_TIME, key1.length, value1.length, key1, value1, headers, Optional.empty()));
    consumerRecordsList.add(new ConsumerRecord<>(topicName, 1, 1, System.currentTimeMillis(), TimestampType.CREATE_TIME, key2.length, value2.length, key2, value2, headers, Optional.empty()));
    ConsumerRecords<byte[], byte[]> consumerRecords = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(topicName, 0), consumerRecordsList));
    @SuppressWarnings("unchecked") KafkaConsumer<byte[], byte[]> consumer = mock(KafkaConsumer.class);
    when(consumer.poll(any())).thenReturn(consumerRecords);
    MirrorMetrics metrics = mock(MirrorMetrics.class);
    String sourceClusterName = "cluster1";
    ReplicationPolicy replicationPolicy = new DefaultReplicationPolicy();
    MirrorSourceTask mirrorSourceTask = new MirrorSourceTask(consumer, metrics, sourceClusterName, replicationPolicy, 50);
    List<SourceRecord> sourceRecords = mirrorSourceTask.poll();
    assertEquals(2, sourceRecords.size());
    for (int i = 0; i < sourceRecords.size(); i++) {
        SourceRecord sourceRecord = sourceRecords.get(i);
        ConsumerRecord<byte[], byte[]> consumerRecord = consumerRecordsList.get(i);
        assertEquals(consumerRecord.key(), sourceRecord.key(), "consumerRecord key does not equal sourceRecord key");
        assertEquals(consumerRecord.value(), sourceRecord.value(), "consumerRecord value does not equal sourceRecord value");
        // We expect that the topicname will be based on the replication policy currently used
        assertEquals(replicationPolicy.formatRemoteTopic(sourceClusterName, topicName), sourceRecord.topic(), "topicName not the same as the current replicationPolicy");
        // We expect that MirrorMaker will keep the same partition assignment
        assertEquals(consumerRecord.partition(), sourceRecord.kafkaPartition().intValue(), "partition assignment not the same as the current replicationPolicy");
        // Check header values
        List<Header> expectedHeaders = new ArrayList<>();
        consumerRecord.headers().forEach(expectedHeaders::add);
        List<org.apache.kafka.connect.header.Header> taskHeaders = new ArrayList<>();
        sourceRecord.headers().forEach(taskHeaders::add);
        compareHeaders(expectedHeaders, taskHeaders);
    }
}
Also used : ArrayList(java.util.ArrayList) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) SourceRecord(org.apache.kafka.connect.source.SourceRecord) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Header(org.apache.kafka.common.header.Header) TopicPartition(org.apache.kafka.common.TopicPartition) Test(org.junit.jupiter.api.Test)

Example 14 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class KStreamSessionWindowAggregateProcessorTest method shouldLogAndMeterWhenSkippingNullKeyWithBuiltInMetrics.

@Test
public void shouldLogAndMeterWhenSkippingNullKeyWithBuiltInMetrics() {
    setup(false);
    context.setRecordContext(new ProcessorRecordContext(-1, -2, -3, "topic", new RecordHeaders()));
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamSessionWindowAggregate.class)) {
        processor.process(new Record<>(null, "1", 0L));
        assertThat(appender.getEvents().stream().filter(e -> e.getLevel().equals("WARN")).map(Event::getMessage).collect(Collectors.toList()), hasItem("Skipping record due to null key. topic=[topic] partition=[-3] offset=[-2]"));
    }
    assertEquals(1.0, getMetricByName(context.metrics().metrics(), "dropped-records-total", "stream-task-metrics").metricValue());
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) CoreMatchers.hasItem(org.hamcrest.CoreMatchers.hasItem) TaskMetrics(org.apache.kafka.streams.processor.internals.metrics.TaskMetrics) Stores(org.apache.kafka.streams.state.Stores) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) LogContext(org.apache.kafka.common.utils.LogContext) After(org.junit.After) MetricName(org.apache.kafka.common.MetricName) Serdes(org.apache.kafka.common.serialization.Serdes) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) Aggregator(org.apache.kafka.streams.kstream.Aggregator) Event(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender.Event) Time(org.apache.kafka.common.utils.Time) TestUtils(org.apache.kafka.test.TestUtils) ThreadCache(org.apache.kafka.streams.state.internals.ThreadCache) KeyValue(org.apache.kafka.streams.KeyValue) Collectors(java.util.stream.Collectors) List(java.util.List) Metrics(org.apache.kafka.common.metrics.Metrics) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) Assert.assertFalse(org.junit.Assert.assertFalse) Matchers.greaterThan(org.hamcrest.Matchers.greaterThan) Duration.ofMillis(java.time.Duration.ofMillis) StreamsConfig(org.apache.kafka.streams.StreamsConfig) SessionWindows(org.apache.kafka.streams.kstream.SessionWindows) ProcessorRecordContext(org.apache.kafka.streams.processor.internals.ProcessorRecordContext) ArrayList(java.util.ArrayList) Initializer(org.apache.kafka.streams.kstream.Initializer) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) StateStoreContext(org.apache.kafka.streams.processor.StateStoreContext) Windowed(org.apache.kafka.streams.kstream.Windowed) Record(org.apache.kafka.streams.processor.api.Record) Processor(org.apache.kafka.streams.processor.api.Processor) SessionStore(org.apache.kafka.streams.state.SessionStore) MockRecordCollector(org.apache.kafka.test.MockRecordCollector) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) StreamsTestUtils.getMetricByName(org.apache.kafka.test.StreamsTestUtils.getMetricByName) Before(org.junit.Before) InternalMockProcessorContext(org.apache.kafka.test.InternalMockProcessorContext) Merger(org.apache.kafka.streams.kstream.Merger) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) StoreBuilder(org.apache.kafka.streams.state.StoreBuilder) KeyValueTimestamp(org.apache.kafka.streams.KeyValueTimestamp) KeyValueIterator(org.apache.kafka.streams.state.KeyValueIterator) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) StreamsTestUtils(org.apache.kafka.test.StreamsTestUtils) Assert.assertEquals(org.junit.Assert.assertEquals) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ProcessorRecordContext(org.apache.kafka.streams.processor.internals.ProcessorRecordContext) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Event(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender.Event) Test(org.junit.Test)

Example 15 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class NamedCacheTest method shouldThrowIllegalStateExceptionWhenTryingToOverwriteDirtyEntryWithCleanEntry.

@Test
public void shouldThrowIllegalStateExceptionWhenTryingToOverwriteDirtyEntryWithCleanEntry() {
    cache.put(Bytes.wrap(new byte[] { 0 }), new LRUCacheEntry(new byte[] { 10 }, headers, true, 0, 0, 0, ""));
    assertThrows(IllegalStateException.class, () -> cache.put(Bytes.wrap(new byte[] { 0 }), new LRUCacheEntry(new byte[] { 10 }, new RecordHeaders(), false, 0, 0, 0, "")));
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Test(org.junit.Test)

Aggregations

RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)156 Test (org.junit.Test)111 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)52 ProcessorRecordContext (org.apache.kafka.streams.processor.internals.ProcessorRecordContext)41 Headers (org.apache.kafka.common.header.Headers)34 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)27 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)24 TopicPartition (org.apache.kafka.common.TopicPartition)22 Position (org.apache.kafka.streams.query.Position)17 ArrayList (java.util.ArrayList)13 Header (org.apache.kafka.common.header.Header)13 HashMap (java.util.HashMap)12 ByteBuffer (java.nio.ByteBuffer)11 Struct (org.apache.kafka.connect.data.Struct)11 Test (org.junit.jupiter.api.Test)11 LinkedHashMap (java.util.LinkedHashMap)9 Bytes (org.apache.kafka.common.utils.Bytes)9 StreamsException (org.apache.kafka.streams.errors.StreamsException)9 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)8 Metrics (org.apache.kafka.common.metrics.Metrics)8