Search in sources :

Example 1 with RecordHeader

use of org.apache.kafka.common.header.internals.RecordHeader in project apache-kafka-on-k8s by banzaicloud.

the class DefaultRecordTest method testBasicSerdeInvalidHeaderCountTooLow.

@Test(expected = InvalidRecordException.class)
public void testBasicSerdeInvalidHeaderCountTooLow() throws IOException {
    Header[] headers = new Header[] { new RecordHeader("foo", "value".getBytes()), new RecordHeader("bar", (byte[]) null), new RecordHeader("\"A\\u00ea\\u00f1\\u00fcC\"", "value".getBytes()) };
    SimpleRecord record = new SimpleRecord(15L, "hi".getBytes(), "there".getBytes(), headers);
    int baseSequence = 723;
    long baseOffset = 37;
    int offsetDelta = 10;
    long baseTimestamp = System.currentTimeMillis();
    long timestampDelta = 323;
    ByteBufferOutputStream out = new ByteBufferOutputStream(1024);
    DefaultRecord.writeTo(new DataOutputStream(out), offsetDelta, timestampDelta, record.key(), record.value(), record.headers());
    ByteBuffer buffer = out.buffer();
    buffer.flip();
    buffer.put(14, (byte) 4);
    DefaultRecord logRecord = DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, baseSequence, null);
    // force iteration through the record to validate the number of headers
    assertEquals(DefaultRecord.sizeInBytes(offsetDelta, timestampDelta, record.key(), record.value(), record.headers()), logRecord.sizeInBytes());
}
Also used : Header(org.apache.kafka.common.header.Header) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream) DataOutputStream(java.io.DataOutputStream) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 2 with RecordHeader

use of org.apache.kafka.common.header.internals.RecordHeader in project apache-kafka-on-k8s by banzaicloud.

the class DefaultRecordTest method testBasicSerde.

@Test
public void testBasicSerde() throws IOException {
    Header[] headers = new Header[] { new RecordHeader("foo", "value".getBytes()), new RecordHeader("bar", (byte[]) null), new RecordHeader("\"A\\u00ea\\u00f1\\u00fcC\"", "value".getBytes()) };
    SimpleRecord[] records = new SimpleRecord[] { new SimpleRecord("hi".getBytes(), "there".getBytes()), new SimpleRecord(null, "there".getBytes()), new SimpleRecord("hi".getBytes(), null), new SimpleRecord(null, null), new SimpleRecord(15L, "hi".getBytes(), "there".getBytes(), headers) };
    for (SimpleRecord record : records) {
        int baseSequence = 723;
        long baseOffset = 37;
        int offsetDelta = 10;
        long baseTimestamp = System.currentTimeMillis();
        long timestampDelta = 323;
        ByteBufferOutputStream out = new ByteBufferOutputStream(1024);
        DefaultRecord.writeTo(new DataOutputStream(out), offsetDelta, timestampDelta, record.key(), record.value(), record.headers());
        ByteBuffer buffer = out.buffer();
        buffer.flip();
        DefaultRecord logRecord = DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, baseSequence, null);
        assertNotNull(logRecord);
        assertEquals(baseOffset + offsetDelta, logRecord.offset());
        assertEquals(baseSequence + offsetDelta, logRecord.sequence());
        assertEquals(baseTimestamp + timestampDelta, logRecord.timestamp());
        assertEquals(record.key(), logRecord.key());
        assertEquals(record.value(), logRecord.value());
        assertArrayEquals(record.headers(), logRecord.headers());
        assertEquals(DefaultRecord.sizeInBytes(offsetDelta, timestampDelta, record.key(), record.value(), record.headers()), logRecord.sizeInBytes());
    }
}
Also used : Header(org.apache.kafka.common.header.Header) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream) DataOutputStream(java.io.DataOutputStream) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 3 with RecordHeader

use of org.apache.kafka.common.header.internals.RecordHeader in project apache-kafka-on-k8s by banzaicloud.

the class DefaultRecordTest method testBasicSerdeInvalidHeaderCountTooHigh.

@Test(expected = InvalidRecordException.class)
public void testBasicSerdeInvalidHeaderCountTooHigh() throws IOException {
    Header[] headers = new Header[] { new RecordHeader("foo", "value".getBytes()), new RecordHeader("bar", (byte[]) null), new RecordHeader("\"A\\u00ea\\u00f1\\u00fcC\"", "value".getBytes()) };
    SimpleRecord record = new SimpleRecord(15L, "hi".getBytes(), "there".getBytes(), headers);
    int baseSequence = 723;
    long baseOffset = 37;
    int offsetDelta = 10;
    long baseTimestamp = System.currentTimeMillis();
    long timestampDelta = 323;
    ByteBufferOutputStream out = new ByteBufferOutputStream(1024);
    DefaultRecord.writeTo(new DataOutputStream(out), offsetDelta, timestampDelta, record.key(), record.value(), record.headers());
    ByteBuffer buffer = out.buffer();
    buffer.flip();
    buffer.put(14, (byte) 8);
    DefaultRecord logRecord = DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, baseSequence, null);
    // force iteration through the record to validate the number of headers
    assertEquals(DefaultRecord.sizeInBytes(offsetDelta, timestampDelta, record.key(), record.value(), record.headers()), logRecord.sizeInBytes());
}
Also used : Header(org.apache.kafka.common.header.Header) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream) DataOutputStream(java.io.DataOutputStream) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 4 with RecordHeader

use of org.apache.kafka.common.header.internals.RecordHeader in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testHeaders.

@Test
public void testHeaders() {
    Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(time));
    MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 1L);
    builder.append(0L, "key".getBytes(), "value-1".getBytes());
    Header[] headersArray = new Header[1];
    headersArray[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8));
    builder.append(0L, "key".getBytes(), "value-2".getBytes(), headersArray);
    Header[] headersArray2 = new Header[2];
    headersArray2[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8));
    headersArray2[1] = new RecordHeader("headerKey", "headerValue2".getBytes(StandardCharsets.UTF_8));
    builder.append(0L, "key".getBytes(), "value-3".getBytes(), headersArray2);
    MemoryRecords memoryRecords = builder.build();
    List<ConsumerRecord<byte[], byte[]>> records;
    subscriptions.assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 1);
    client.prepareResponse(matchesOffset(tp0, 1), fullFetchResponse(tp0, memoryRecords, Errors.NONE, 100L, 0));
    assertEquals(1, fetcher.sendFetches());
    consumerClient.poll(0);
    records = fetcher.fetchedRecords().get(tp0);
    assertEquals(3, records.size());
    Iterator<ConsumerRecord<byte[], byte[]>> recordIterator = records.iterator();
    ConsumerRecord<byte[], byte[]> record = recordIterator.next();
    assertNull(record.headers().lastHeader("headerKey"));
    record = recordIterator.next();
    assertEquals("headerValue", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8));
    assertEquals("headerKey", record.headers().lastHeader("headerKey").key());
    record = recordIterator.next();
    assertEquals("headerValue2", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8));
    assertEquals("headerKey", record.headers().lastHeader("headerKey").key());
}
Also used : Metrics(org.apache.kafka.common.metrics.Metrics) Header(org.apache.kafka.common.header.Header) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ResponseHeader(org.apache.kafka.common.requests.ResponseHeader) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.Test)

Example 5 with RecordHeader

use of org.apache.kafka.common.header.internals.RecordHeader in project kafka by apache.

the class FileRecordsTest method doTestConversion.

private void doTestConversion(CompressionType compressionType, byte toMagic) throws IOException {
    List<Long> offsets = asList(0L, 2L, 3L, 9L, 11L, 15L, 16L, 17L, 22L, 24L);
    Header[] headers = { new RecordHeader("headerKey1", "headerValue1".getBytes()), new RecordHeader("headerKey2", "headerValue2".getBytes()), new RecordHeader("headerKey3", "headerValue3".getBytes()) };
    List<SimpleRecord> records = asList(new SimpleRecord(1L, "k1".getBytes(), "hello".getBytes()), new SimpleRecord(2L, "k2".getBytes(), "goodbye".getBytes()), new SimpleRecord(3L, "k3".getBytes(), "hello again".getBytes()), new SimpleRecord(4L, "k4".getBytes(), "goodbye for now".getBytes()), new SimpleRecord(5L, "k5".getBytes(), "hello again".getBytes()), new SimpleRecord(6L, "k6".getBytes(), "I sense indecision".getBytes()), new SimpleRecord(7L, "k7".getBytes(), "what now".getBytes()), new SimpleRecord(8L, "k8".getBytes(), "running out".getBytes(), headers), new SimpleRecord(9L, "k9".getBytes(), "ok, almost done".getBytes()), new SimpleRecord(10L, "k10".getBytes(), "finally".getBytes(), headers));
    assertEquals(offsets.size(), records.size(), "incorrect test setup");
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V0, compressionType, TimestampType.CREATE_TIME, 0L);
    for (int i = 0; i < 3; i++) builder.appendWithOffset(offsets.get(i), records.get(i));
    builder.close();
    builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V1, compressionType, TimestampType.CREATE_TIME, 0L);
    for (int i = 3; i < 6; i++) builder.appendWithOffset(offsets.get(i), records.get(i));
    builder.close();
    builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, compressionType, TimestampType.CREATE_TIME, 0L);
    for (int i = 6; i < 10; i++) builder.appendWithOffset(offsets.get(i), records.get(i));
    builder.close();
    buffer.flip();
    try (FileRecords fileRecords = FileRecords.open(tempFile())) {
        fileRecords.append(MemoryRecords.readableRecords(buffer));
        fileRecords.flush();
        downConvertAndVerifyRecords(records, offsets, fileRecords, compressionType, toMagic, 0L, time);
        if (toMagic <= RecordBatch.MAGIC_VALUE_V1 && compressionType == CompressionType.NONE) {
            long firstOffset;
            if (toMagic == RecordBatch.MAGIC_VALUE_V0)
                // v1 record
                firstOffset = 11L;
            else
                // v2 record
                firstOffset = 17;
            List<Long> filteredOffsets = new ArrayList<>(offsets);
            List<SimpleRecord> filteredRecords = new ArrayList<>(records);
            int index = filteredOffsets.indexOf(firstOffset) - 1;
            filteredRecords.remove(index);
            filteredOffsets.remove(index);
            downConvertAndVerifyRecords(filteredRecords, filteredOffsets, fileRecords, compressionType, toMagic, firstOffset, time);
        } else {
            // firstOffset doesn't have any effect in this case
            downConvertAndVerifyRecords(records, offsets, fileRecords, compressionType, toMagic, 10L, time);
        }
    }
}
Also used : ArrayList(java.util.ArrayList) ByteBuffer(java.nio.ByteBuffer) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Header(org.apache.kafka.common.header.Header) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader)

Aggregations

RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)44 RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)25 Header (org.apache.kafka.common.header.Header)21 Test (org.junit.Test)17 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)15 Headers (org.apache.kafka.common.header.Headers)15 ByteBuffer (java.nio.ByteBuffer)10 Test (org.junit.jupiter.api.Test)10 TopicPartition (org.apache.kafka.common.TopicPartition)8 ArrayList (java.util.ArrayList)7 DataOutputStream (java.io.DataOutputStream)6 ByteBufferOutputStream (org.apache.kafka.common.utils.ByteBufferOutputStream)6 Position (org.apache.kafka.streams.query.Position)6 ProcessorRecordContext (org.apache.kafka.streams.processor.internals.ProcessorRecordContext)5 RecordBatchingStateRestoreCallback (org.apache.kafka.streams.processor.internals.RecordBatchingStateRestoreCallback)5 MockInternalProcessorContext (org.apache.kafka.test.MockInternalProcessorContext)5 LinkedList (java.util.LinkedList)4 MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)4 Change (org.apache.kafka.streams.kstream.internals.Change)4 Eviction (org.apache.kafka.streams.state.internals.TimeOrderedKeyValueBuffer.Eviction)4