Search in sources :

Example 1 with Header

use of org.apache.kafka.common.header.Header in project apache-kafka-on-k8s by banzaicloud.

the class DefaultRecordTest method testBasicSerdeInvalidHeaderCountTooLow.

@Test(expected = InvalidRecordException.class)
public void testBasicSerdeInvalidHeaderCountTooLow() throws IOException {
    Header[] headers = new Header[] { new RecordHeader("foo", "value".getBytes()), new RecordHeader("bar", (byte[]) null), new RecordHeader("\"A\\u00ea\\u00f1\\u00fcC\"", "value".getBytes()) };
    SimpleRecord record = new SimpleRecord(15L, "hi".getBytes(), "there".getBytes(), headers);
    int baseSequence = 723;
    long baseOffset = 37;
    int offsetDelta = 10;
    long baseTimestamp = System.currentTimeMillis();
    long timestampDelta = 323;
    ByteBufferOutputStream out = new ByteBufferOutputStream(1024);
    DefaultRecord.writeTo(new DataOutputStream(out), offsetDelta, timestampDelta, record.key(), record.value(), record.headers());
    ByteBuffer buffer = out.buffer();
    buffer.flip();
    buffer.put(14, (byte) 4);
    DefaultRecord logRecord = DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, baseSequence, null);
    // force iteration through the record to validate the number of headers
    assertEquals(DefaultRecord.sizeInBytes(offsetDelta, timestampDelta, record.key(), record.value(), record.headers()), logRecord.sizeInBytes());
}
Also used : Header(org.apache.kafka.common.header.Header) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream) DataOutputStream(java.io.DataOutputStream) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 2 with Header

use of org.apache.kafka.common.header.Header in project apache-kafka-on-k8s by banzaicloud.

the class DefaultRecordTest method testBasicSerde.

@Test
public void testBasicSerde() throws IOException {
    Header[] headers = new Header[] { new RecordHeader("foo", "value".getBytes()), new RecordHeader("bar", (byte[]) null), new RecordHeader("\"A\\u00ea\\u00f1\\u00fcC\"", "value".getBytes()) };
    SimpleRecord[] records = new SimpleRecord[] { new SimpleRecord("hi".getBytes(), "there".getBytes()), new SimpleRecord(null, "there".getBytes()), new SimpleRecord("hi".getBytes(), null), new SimpleRecord(null, null), new SimpleRecord(15L, "hi".getBytes(), "there".getBytes(), headers) };
    for (SimpleRecord record : records) {
        int baseSequence = 723;
        long baseOffset = 37;
        int offsetDelta = 10;
        long baseTimestamp = System.currentTimeMillis();
        long timestampDelta = 323;
        ByteBufferOutputStream out = new ByteBufferOutputStream(1024);
        DefaultRecord.writeTo(new DataOutputStream(out), offsetDelta, timestampDelta, record.key(), record.value(), record.headers());
        ByteBuffer buffer = out.buffer();
        buffer.flip();
        DefaultRecord logRecord = DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, baseSequence, null);
        assertNotNull(logRecord);
        assertEquals(baseOffset + offsetDelta, logRecord.offset());
        assertEquals(baseSequence + offsetDelta, logRecord.sequence());
        assertEquals(baseTimestamp + timestampDelta, logRecord.timestamp());
        assertEquals(record.key(), logRecord.key());
        assertEquals(record.value(), logRecord.value());
        assertArrayEquals(record.headers(), logRecord.headers());
        assertEquals(DefaultRecord.sizeInBytes(offsetDelta, timestampDelta, record.key(), record.value(), record.headers()), logRecord.sizeInBytes());
    }
}
Also used : Header(org.apache.kafka.common.header.Header) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream) DataOutputStream(java.io.DataOutputStream) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 3 with Header

use of org.apache.kafka.common.header.Header in project apache-kafka-on-k8s by banzaicloud.

the class DefaultRecordTest method testBasicSerdeInvalidHeaderCountTooHigh.

@Test(expected = InvalidRecordException.class)
public void testBasicSerdeInvalidHeaderCountTooHigh() throws IOException {
    Header[] headers = new Header[] { new RecordHeader("foo", "value".getBytes()), new RecordHeader("bar", (byte[]) null), new RecordHeader("\"A\\u00ea\\u00f1\\u00fcC\"", "value".getBytes()) };
    SimpleRecord record = new SimpleRecord(15L, "hi".getBytes(), "there".getBytes(), headers);
    int baseSequence = 723;
    long baseOffset = 37;
    int offsetDelta = 10;
    long baseTimestamp = System.currentTimeMillis();
    long timestampDelta = 323;
    ByteBufferOutputStream out = new ByteBufferOutputStream(1024);
    DefaultRecord.writeTo(new DataOutputStream(out), offsetDelta, timestampDelta, record.key(), record.value(), record.headers());
    ByteBuffer buffer = out.buffer();
    buffer.flip();
    buffer.put(14, (byte) 8);
    DefaultRecord logRecord = DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, baseSequence, null);
    // force iteration through the record to validate the number of headers
    assertEquals(DefaultRecord.sizeInBytes(offsetDelta, timestampDelta, record.key(), record.value(), record.headers()), logRecord.sizeInBytes());
}
Also used : Header(org.apache.kafka.common.header.Header) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream) DataOutputStream(java.io.DataOutputStream) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 4 with Header

use of org.apache.kafka.common.header.Header in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testHeaders.

@Test
public void testHeaders() {
    Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(time));
    MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 1L);
    builder.append(0L, "key".getBytes(), "value-1".getBytes());
    Header[] headersArray = new Header[1];
    headersArray[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8));
    builder.append(0L, "key".getBytes(), "value-2".getBytes(), headersArray);
    Header[] headersArray2 = new Header[2];
    headersArray2[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8));
    headersArray2[1] = new RecordHeader("headerKey", "headerValue2".getBytes(StandardCharsets.UTF_8));
    builder.append(0L, "key".getBytes(), "value-3".getBytes(), headersArray2);
    MemoryRecords memoryRecords = builder.build();
    List<ConsumerRecord<byte[], byte[]>> records;
    subscriptions.assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 1);
    client.prepareResponse(matchesOffset(tp0, 1), fullFetchResponse(tp0, memoryRecords, Errors.NONE, 100L, 0));
    assertEquals(1, fetcher.sendFetches());
    consumerClient.poll(0);
    records = fetcher.fetchedRecords().get(tp0);
    assertEquals(3, records.size());
    Iterator<ConsumerRecord<byte[], byte[]>> recordIterator = records.iterator();
    ConsumerRecord<byte[], byte[]> record = recordIterator.next();
    assertNull(record.headers().lastHeader("headerKey"));
    record = recordIterator.next();
    assertEquals("headerValue", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8));
    assertEquals("headerKey", record.headers().lastHeader("headerKey").key());
    record = recordIterator.next();
    assertEquals("headerValue2", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8));
    assertEquals("headerKey", record.headers().lastHeader("headerKey").key());
}
Also used : Metrics(org.apache.kafka.common.metrics.Metrics) Header(org.apache.kafka.common.header.Header) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ResponseHeader(org.apache.kafka.common.requests.ResponseHeader) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.Test)

Example 5 with Header

use of org.apache.kafka.common.header.Header in project apache-kafka-on-k8s by banzaicloud.

the class KafkaProducer method doSend.

/**
 * Implementation of asynchronously send a record to a topic.
 */
private Future<RecordMetadata> doSend(ProducerRecord<K, V> record, Callback callback) {
    TopicPartition tp = null;
    try {
        // first make sure the metadata for the topic is available
        ClusterAndWaitTime clusterAndWaitTime = waitOnMetadata(record.topic(), record.partition(), maxBlockTimeMs);
        long remainingWaitMs = Math.max(0, maxBlockTimeMs - clusterAndWaitTime.waitedOnMetadataMs);
        Cluster cluster = clusterAndWaitTime.cluster;
        byte[] serializedKey;
        try {
            serializedKey = keySerializer.serialize(record.topic(), record.headers(), record.key());
        } catch (ClassCastException cce) {
            throw new SerializationException("Can't convert key of class " + record.key().getClass().getName() + " to class " + producerConfig.getClass(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG).getName() + " specified in key.serializer", cce);
        }
        byte[] serializedValue;
        try {
            serializedValue = valueSerializer.serialize(record.topic(), record.headers(), record.value());
        } catch (ClassCastException cce) {
            throw new SerializationException("Can't convert value of class " + record.value().getClass().getName() + " to class " + producerConfig.getClass(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG).getName() + " specified in value.serializer", cce);
        }
        int partition = partition(record, serializedKey, serializedValue, cluster);
        tp = new TopicPartition(record.topic(), partition);
        setReadOnly(record.headers());
        Header[] headers = record.headers().toArray();
        int serializedSize = AbstractRecords.estimateSizeInBytesUpperBound(apiVersions.maxUsableProduceMagic(), compressionType, serializedKey, serializedValue, headers);
        ensureValidRecordSize(serializedSize);
        long timestamp = record.timestamp() == null ? time.milliseconds() : record.timestamp();
        log.trace("Sending record {} with callback {} to topic {} partition {}", record, callback, record.topic(), partition);
        // producer callback will make sure to call both 'callback' and interceptor callback
        Callback interceptCallback = new InterceptorCallback<>(callback, this.interceptors, tp);
        if (transactionManager != null && transactionManager.isTransactional())
            transactionManager.maybeAddPartitionToTransaction(tp);
        RecordAccumulator.RecordAppendResult result = accumulator.append(tp, timestamp, serializedKey, serializedValue, headers, interceptCallback, remainingWaitMs);
        if (result.batchIsFull || result.newBatchCreated) {
            log.trace("Waking up the sender since topic {} partition {} is either full or getting a new batch", record.topic(), partition);
            this.sender.wakeup();
        }
        return result.future;
    // handling exceptions and record the errors;
    // for API exceptions return them in the future,
    // for other exceptions throw directly
    } catch (ApiException e) {
        log.debug("Exception occurred during message send:", e);
        if (callback != null)
            callback.onCompletion(null, e);
        this.errors.record();
        this.interceptors.onSendError(record, tp, e);
        return new FutureFailure(e);
    } catch (InterruptedException e) {
        this.errors.record();
        this.interceptors.onSendError(record, tp, e);
        throw new InterruptException(e);
    } catch (BufferExhaustedException e) {
        this.errors.record();
        this.metrics.sensor("buffer-exhausted-records").record();
        this.interceptors.onSendError(record, tp, e);
        throw e;
    } catch (KafkaException e) {
        this.errors.record();
        this.interceptors.onSendError(record, tp, e);
        throw e;
    } catch (Exception e) {
        // we notify interceptor about all exceptions, since onSend is called before anything else in this method
        this.interceptors.onSendError(record, tp, e);
        throw e;
    }
}
Also used : SerializationException(org.apache.kafka.common.errors.SerializationException) InterruptException(org.apache.kafka.common.errors.InterruptException) Cluster(org.apache.kafka.common.Cluster) RecordAccumulator(org.apache.kafka.clients.producer.internals.RecordAccumulator) SerializationException(org.apache.kafka.common.errors.SerializationException) KafkaException(org.apache.kafka.common.KafkaException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) AuthenticationException(org.apache.kafka.common.errors.AuthenticationException) InterruptException(org.apache.kafka.common.errors.InterruptException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) AuthorizationException(org.apache.kafka.common.errors.AuthorizationException) ConfigException(org.apache.kafka.common.config.ConfigException) ExecutionException(java.util.concurrent.ExecutionException) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) ApiException(org.apache.kafka.common.errors.ApiException) OffsetCommitCallback(org.apache.kafka.clients.consumer.OffsetCommitCallback) Header(org.apache.kafka.common.header.Header) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaException(org.apache.kafka.common.KafkaException) ApiException(org.apache.kafka.common.errors.ApiException)

Aggregations

Header (org.apache.kafka.common.header.Header)13 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)9 Test (org.junit.Test)6 ByteBuffer (java.nio.ByteBuffer)5 TopicPartition (org.apache.kafka.common.TopicPartition)4 DataOutputStream (java.io.DataOutputStream)3 ByteBufferOutputStream (org.apache.kafka.common.utils.ByteBufferOutputStream)3 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)2 MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)2 BufferUnderflowException (java.nio.BufferUnderflowException)1 Date (java.util.Date)1 HashMap (java.util.HashMap)1 List (java.util.List)1 Map (java.util.Map)1 ExecutionException (java.util.concurrent.ExecutionException)1 ConnectionContext (loghub.ConnectionContext)1 DecodeException (loghub.Decoder.DecodeException)1 Event (loghub.Event)1 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)1 OffsetCommitCallback (org.apache.kafka.clients.consumer.OffsetCommitCallback)1