Search in sources :

Example 11 with RecordHeader

use of org.apache.kafka.common.header.internals.RecordHeader in project kafka by apache.

the class ConsumerRecordTest method testConstructorsWithChecksum.

@Test
@Deprecated
public void testConstructorsWithChecksum() {
    String topic = "topic";
    int partition = 0;
    long offset = 23;
    long timestamp = 23434217432432L;
    TimestampType timestampType = TimestampType.CREATE_TIME;
    String key = "key";
    String value = "value";
    long checksum = 50L;
    int serializedKeySize = 100;
    int serializedValueSize = 1142;
    ConsumerRecord<String, String> record = new ConsumerRecord<>(topic, partition, offset, timestamp, timestampType, checksum, serializedKeySize, serializedValueSize, key, value);
    assertEquals(topic, record.topic());
    assertEquals(partition, record.partition());
    assertEquals(offset, record.offset());
    assertEquals(key, record.key());
    assertEquals(value, record.value());
    assertEquals(timestampType, record.timestampType());
    assertEquals(timestamp, record.timestamp());
    assertEquals(serializedKeySize, record.serializedKeySize());
    assertEquals(serializedValueSize, record.serializedValueSize());
    assertEquals(Optional.empty(), record.leaderEpoch());
    assertEquals(new RecordHeaders(), record.headers());
    RecordHeaders headers = new RecordHeaders();
    headers.add(new RecordHeader("header key", "header value".getBytes(StandardCharsets.UTF_8)));
    record = new ConsumerRecord<>(topic, partition, offset, timestamp, timestampType, checksum, serializedKeySize, serializedValueSize, key, value, headers);
    assertEquals(topic, record.topic());
    assertEquals(partition, record.partition());
    assertEquals(offset, record.offset());
    assertEquals(key, record.key());
    assertEquals(value, record.value());
    assertEquals(timestampType, record.timestampType());
    assertEquals(timestamp, record.timestamp());
    assertEquals(serializedKeySize, record.serializedKeySize());
    assertEquals(serializedValueSize, record.serializedValueSize());
    assertEquals(Optional.empty(), record.leaderEpoch());
    assertEquals(headers, record.headers());
    Optional<Integer> leaderEpoch = Optional.of(10);
    record = new ConsumerRecord<>(topic, partition, offset, timestamp, timestampType, checksum, serializedKeySize, serializedValueSize, key, value, headers, leaderEpoch);
    assertEquals(topic, record.topic());
    assertEquals(partition, record.partition());
    assertEquals(offset, record.offset());
    assertEquals(key, record.key());
    assertEquals(value, record.value());
    assertEquals(timestampType, record.timestampType());
    assertEquals(timestamp, record.timestamp());
    assertEquals(serializedKeySize, record.serializedKeySize());
    assertEquals(serializedValueSize, record.serializedValueSize());
    assertEquals(leaderEpoch, record.leaderEpoch());
    assertEquals(headers, record.headers());
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) TimestampType(org.apache.kafka.common.record.TimestampType) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Test(org.junit.jupiter.api.Test)

Example 12 with RecordHeader

use of org.apache.kafka.common.header.internals.RecordHeader in project kafka by apache.

the class MirrorSourceTaskTest method testPoll.

@Test
public void testPoll() {
    // Create a consumer mock
    byte[] key1 = "abc".getBytes();
    byte[] value1 = "fgh".getBytes();
    byte[] key2 = "123".getBytes();
    byte[] value2 = "456".getBytes();
    List<ConsumerRecord<byte[], byte[]>> consumerRecordsList = new ArrayList<>();
    String topicName = "test";
    String headerKey = "key";
    RecordHeaders headers = new RecordHeaders(new Header[] { new RecordHeader(headerKey, "value".getBytes()) });
    consumerRecordsList.add(new ConsumerRecord<>(topicName, 0, 0, System.currentTimeMillis(), TimestampType.CREATE_TIME, key1.length, value1.length, key1, value1, headers, Optional.empty()));
    consumerRecordsList.add(new ConsumerRecord<>(topicName, 1, 1, System.currentTimeMillis(), TimestampType.CREATE_TIME, key2.length, value2.length, key2, value2, headers, Optional.empty()));
    ConsumerRecords<byte[], byte[]> consumerRecords = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(topicName, 0), consumerRecordsList));
    @SuppressWarnings("unchecked") KafkaConsumer<byte[], byte[]> consumer = mock(KafkaConsumer.class);
    when(consumer.poll(any())).thenReturn(consumerRecords);
    MirrorMetrics metrics = mock(MirrorMetrics.class);
    String sourceClusterName = "cluster1";
    ReplicationPolicy replicationPolicy = new DefaultReplicationPolicy();
    MirrorSourceTask mirrorSourceTask = new MirrorSourceTask(consumer, metrics, sourceClusterName, replicationPolicy, 50);
    List<SourceRecord> sourceRecords = mirrorSourceTask.poll();
    assertEquals(2, sourceRecords.size());
    for (int i = 0; i < sourceRecords.size(); i++) {
        SourceRecord sourceRecord = sourceRecords.get(i);
        ConsumerRecord<byte[], byte[]> consumerRecord = consumerRecordsList.get(i);
        assertEquals(consumerRecord.key(), sourceRecord.key(), "consumerRecord key does not equal sourceRecord key");
        assertEquals(consumerRecord.value(), sourceRecord.value(), "consumerRecord value does not equal sourceRecord value");
        // We expect that the topicname will be based on the replication policy currently used
        assertEquals(replicationPolicy.formatRemoteTopic(sourceClusterName, topicName), sourceRecord.topic(), "topicName not the same as the current replicationPolicy");
        // We expect that MirrorMaker will keep the same partition assignment
        assertEquals(consumerRecord.partition(), sourceRecord.kafkaPartition().intValue(), "partition assignment not the same as the current replicationPolicy");
        // Check header values
        List<Header> expectedHeaders = new ArrayList<>();
        consumerRecord.headers().forEach(expectedHeaders::add);
        List<org.apache.kafka.connect.header.Header> taskHeaders = new ArrayList<>();
        sourceRecord.headers().forEach(taskHeaders::add);
        compareHeaders(expectedHeaders, taskHeaders);
    }
}
Also used : ArrayList(java.util.ArrayList) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) SourceRecord(org.apache.kafka.connect.source.SourceRecord) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Header(org.apache.kafka.common.header.Header) TopicPartition(org.apache.kafka.common.TopicPartition) Test(org.junit.jupiter.api.Test)

Example 13 with RecordHeader

use of org.apache.kafka.common.header.internals.RecordHeader in project kafka by apache.

the class FetcherTest method testHeaders.

@Test
public void testHeaders() {
    buildFetcher();
    MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 1L);
    builder.append(0L, "key".getBytes(), "value-1".getBytes());
    Header[] headersArray = new Header[1];
    headersArray[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8));
    builder.append(0L, "key".getBytes(), "value-2".getBytes(), headersArray);
    Header[] headersArray2 = new Header[2];
    headersArray2[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8));
    headersArray2[1] = new RecordHeader("headerKey", "headerValue2".getBytes(StandardCharsets.UTF_8));
    builder.append(0L, "key".getBytes(), "value-3".getBytes(), headersArray2);
    MemoryRecords memoryRecords = builder.build();
    List<ConsumerRecord<byte[], byte[]>> records;
    assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 1);
    client.prepareResponse(matchesOffset(tidp0, 1), fullFetchResponse(tidp0, memoryRecords, Errors.NONE, 100L, 0));
    assertEquals(1, fetcher.sendFetches());
    consumerClient.poll(time.timer(0));
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsByPartition = fetchedRecords();
    records = recordsByPartition.get(tp0);
    assertEquals(3, records.size());
    Iterator<ConsumerRecord<byte[], byte[]>> recordIterator = records.iterator();
    ConsumerRecord<byte[], byte[]> record = recordIterator.next();
    assertNull(record.headers().lastHeader("headerKey"));
    record = recordIterator.next();
    assertEquals("headerValue", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8));
    assertEquals("headerKey", record.headers().lastHeader("headerKey").key());
    record = recordIterator.next();
    assertEquals("headerValue2", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8));
    assertEquals("headerKey", record.headers().lastHeader("headerKey").key());
}
Also used : ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Header(org.apache.kafka.common.header.Header) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.jupiter.api.Test)

Example 14 with RecordHeader

use of org.apache.kafka.common.header.internals.RecordHeader in project kafka by apache.

the class RecordCollectorTest method shouldSendWithNoPartition.

@Test
public void shouldSendWithNoPartition() {
    final Headers headers = new RecordHeaders(new Header[] { new RecordHeader("key", "value".getBytes()) });
    collector.send(topic, "3", "0", headers, null, null, stringSerializer, stringSerializer);
    collector.send(topic, "9", "0", headers, null, null, stringSerializer, stringSerializer);
    collector.send(topic, "27", "0", headers, null, null, stringSerializer, stringSerializer);
    collector.send(topic, "81", "0", headers, null, null, stringSerializer, stringSerializer);
    collector.send(topic, "243", "0", headers, null, null, stringSerializer, stringSerializer);
    collector.send(topic, "28", "0", headers, null, null, stringSerializer, stringSerializer);
    collector.send(topic, "82", "0", headers, null, null, stringSerializer, stringSerializer);
    collector.send(topic, "244", "0", headers, null, null, stringSerializer, stringSerializer);
    collector.send(topic, "245", "0", headers, null, null, stringSerializer, stringSerializer);
    final Map<TopicPartition, Long> offsets = collector.offsets();
    // with mock producer without specific partition, we would use default producer partitioner with murmur hash
    assertEquals(3L, (long) offsets.get(new TopicPartition(topic, 0)));
    assertEquals(2L, (long) offsets.get(new TopicPartition(topic, 1)));
    assertEquals(1L, (long) offsets.get(new TopicPartition(topic, 2)));
    assertEquals(9, mockProducer.history().size());
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Headers(org.apache.kafka.common.header.Headers) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) TopicPartition(org.apache.kafka.common.TopicPartition) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Test(org.junit.Test)

Example 15 with RecordHeader

use of org.apache.kafka.common.header.internals.RecordHeader in project kafka by apache.

the class RecordCollectorTest method shouldSendToSpecificPartition.

@Test
public void shouldSendToSpecificPartition() {
    final Headers headers = new RecordHeaders(new Header[] { new RecordHeader("key", "value".getBytes()) });
    collector.send(topic, "999", "0", null, 0, null, stringSerializer, stringSerializer);
    collector.send(topic, "999", "0", null, 0, null, stringSerializer, stringSerializer);
    collector.send(topic, "999", "0", null, 0, null, stringSerializer, stringSerializer);
    collector.send(topic, "999", "0", headers, 1, null, stringSerializer, stringSerializer);
    collector.send(topic, "999", "0", headers, 1, null, stringSerializer, stringSerializer);
    collector.send(topic, "999", "0", headers, 2, null, stringSerializer, stringSerializer);
    Map<TopicPartition, Long> offsets = collector.offsets();
    assertEquals(2L, (long) offsets.get(new TopicPartition(topic, 0)));
    assertEquals(1L, (long) offsets.get(new TopicPartition(topic, 1)));
    assertEquals(0L, (long) offsets.get(new TopicPartition(topic, 2)));
    assertEquals(6, mockProducer.history().size());
    collector.send(topic, "999", "0", null, 0, null, stringSerializer, stringSerializer);
    collector.send(topic, "999", "0", null, 1, null, stringSerializer, stringSerializer);
    collector.send(topic, "999", "0", headers, 2, null, stringSerializer, stringSerializer);
    offsets = collector.offsets();
    assertEquals(3L, (long) offsets.get(new TopicPartition(topic, 0)));
    assertEquals(2L, (long) offsets.get(new TopicPartition(topic, 1)));
    assertEquals(1L, (long) offsets.get(new TopicPartition(topic, 2)));
    assertEquals(9, mockProducer.history().size());
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Headers(org.apache.kafka.common.header.Headers) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) TopicPartition(org.apache.kafka.common.TopicPartition) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Test(org.junit.Test)

Aggregations

RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)45 RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)26 Header (org.apache.kafka.common.header.Header)21 Test (org.junit.Test)17 Headers (org.apache.kafka.common.header.Headers)16 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)15 ByteBuffer (java.nio.ByteBuffer)10 Test (org.junit.jupiter.api.Test)10 TopicPartition (org.apache.kafka.common.TopicPartition)8 ArrayList (java.util.ArrayList)7 DataOutputStream (java.io.DataOutputStream)6 ByteBufferOutputStream (org.apache.kafka.common.utils.ByteBufferOutputStream)6 ProcessorRecordContext (org.apache.kafka.streams.processor.internals.ProcessorRecordContext)5 RecordBatchingStateRestoreCallback (org.apache.kafka.streams.processor.internals.RecordBatchingStateRestoreCallback)5 Position (org.apache.kafka.streams.query.Position)5 MockInternalProcessorContext (org.apache.kafka.test.MockInternalProcessorContext)5 LinkedList (java.util.LinkedList)4 MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)4 Change (org.apache.kafka.streams.kstream.internals.Change)4 Eviction (org.apache.kafka.streams.state.internals.TimeOrderedKeyValueBuffer.Eviction)4