use of org.apache.kafka.common.header.Header in project apache-kafka-on-k8s by banzaicloud.
the class DefaultRecordTest method testBasicSerdeInvalidHeaderCountTooLow.
@Test(expected = InvalidRecordException.class)
public void testBasicSerdeInvalidHeaderCountTooLow() throws IOException {
Header[] headers = new Header[] { new RecordHeader("foo", "value".getBytes()), new RecordHeader("bar", (byte[]) null), new RecordHeader("\"A\\u00ea\\u00f1\\u00fcC\"", "value".getBytes()) };
SimpleRecord record = new SimpleRecord(15L, "hi".getBytes(), "there".getBytes(), headers);
int baseSequence = 723;
long baseOffset = 37;
int offsetDelta = 10;
long baseTimestamp = System.currentTimeMillis();
long timestampDelta = 323;
ByteBufferOutputStream out = new ByteBufferOutputStream(1024);
DefaultRecord.writeTo(new DataOutputStream(out), offsetDelta, timestampDelta, record.key(), record.value(), record.headers());
ByteBuffer buffer = out.buffer();
buffer.flip();
buffer.put(14, (byte) 4);
DefaultRecord logRecord = DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, baseSequence, null);
// force iteration through the record to validate the number of headers
assertEquals(DefaultRecord.sizeInBytes(offsetDelta, timestampDelta, record.key(), record.value(), record.headers()), logRecord.sizeInBytes());
}
use of org.apache.kafka.common.header.Header in project apache-kafka-on-k8s by banzaicloud.
the class DefaultRecordTest method testBasicSerde.
@Test
public void testBasicSerde() throws IOException {
Header[] headers = new Header[] { new RecordHeader("foo", "value".getBytes()), new RecordHeader("bar", (byte[]) null), new RecordHeader("\"A\\u00ea\\u00f1\\u00fcC\"", "value".getBytes()) };
SimpleRecord[] records = new SimpleRecord[] { new SimpleRecord("hi".getBytes(), "there".getBytes()), new SimpleRecord(null, "there".getBytes()), new SimpleRecord("hi".getBytes(), null), new SimpleRecord(null, null), new SimpleRecord(15L, "hi".getBytes(), "there".getBytes(), headers) };
for (SimpleRecord record : records) {
int baseSequence = 723;
long baseOffset = 37;
int offsetDelta = 10;
long baseTimestamp = System.currentTimeMillis();
long timestampDelta = 323;
ByteBufferOutputStream out = new ByteBufferOutputStream(1024);
DefaultRecord.writeTo(new DataOutputStream(out), offsetDelta, timestampDelta, record.key(), record.value(), record.headers());
ByteBuffer buffer = out.buffer();
buffer.flip();
DefaultRecord logRecord = DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, baseSequence, null);
assertNotNull(logRecord);
assertEquals(baseOffset + offsetDelta, logRecord.offset());
assertEquals(baseSequence + offsetDelta, logRecord.sequence());
assertEquals(baseTimestamp + timestampDelta, logRecord.timestamp());
assertEquals(record.key(), logRecord.key());
assertEquals(record.value(), logRecord.value());
assertArrayEquals(record.headers(), logRecord.headers());
assertEquals(DefaultRecord.sizeInBytes(offsetDelta, timestampDelta, record.key(), record.value(), record.headers()), logRecord.sizeInBytes());
}
}
use of org.apache.kafka.common.header.Header in project apache-kafka-on-k8s by banzaicloud.
the class DefaultRecordTest method testBasicSerdeInvalidHeaderCountTooHigh.
@Test(expected = InvalidRecordException.class)
public void testBasicSerdeInvalidHeaderCountTooHigh() throws IOException {
Header[] headers = new Header[] { new RecordHeader("foo", "value".getBytes()), new RecordHeader("bar", (byte[]) null), new RecordHeader("\"A\\u00ea\\u00f1\\u00fcC\"", "value".getBytes()) };
SimpleRecord record = new SimpleRecord(15L, "hi".getBytes(), "there".getBytes(), headers);
int baseSequence = 723;
long baseOffset = 37;
int offsetDelta = 10;
long baseTimestamp = System.currentTimeMillis();
long timestampDelta = 323;
ByteBufferOutputStream out = new ByteBufferOutputStream(1024);
DefaultRecord.writeTo(new DataOutputStream(out), offsetDelta, timestampDelta, record.key(), record.value(), record.headers());
ByteBuffer buffer = out.buffer();
buffer.flip();
buffer.put(14, (byte) 8);
DefaultRecord logRecord = DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, baseSequence, null);
// force iteration through the record to validate the number of headers
assertEquals(DefaultRecord.sizeInBytes(offsetDelta, timestampDelta, record.key(), record.value(), record.headers()), logRecord.sizeInBytes());
}
use of org.apache.kafka.common.header.Header in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testHeaders.
@Test
public void testHeaders() {
Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(time));
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 1L);
builder.append(0L, "key".getBytes(), "value-1".getBytes());
Header[] headersArray = new Header[1];
headersArray[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8));
builder.append(0L, "key".getBytes(), "value-2".getBytes(), headersArray);
Header[] headersArray2 = new Header[2];
headersArray2[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8));
headersArray2[1] = new RecordHeader("headerKey", "headerValue2".getBytes(StandardCharsets.UTF_8));
builder.append(0L, "key".getBytes(), "value-3".getBytes(), headersArray2);
MemoryRecords memoryRecords = builder.build();
List<ConsumerRecord<byte[], byte[]>> records;
subscriptions.assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 1);
client.prepareResponse(matchesOffset(tp0, 1), fullFetchResponse(tp0, memoryRecords, Errors.NONE, 100L, 0));
assertEquals(1, fetcher.sendFetches());
consumerClient.poll(0);
records = fetcher.fetchedRecords().get(tp0);
assertEquals(3, records.size());
Iterator<ConsumerRecord<byte[], byte[]>> recordIterator = records.iterator();
ConsumerRecord<byte[], byte[]> record = recordIterator.next();
assertNull(record.headers().lastHeader("headerKey"));
record = recordIterator.next();
assertEquals("headerValue", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8));
assertEquals("headerKey", record.headers().lastHeader("headerKey").key());
record = recordIterator.next();
assertEquals("headerValue2", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8));
assertEquals("headerKey", record.headers().lastHeader("headerKey").key());
}
use of org.apache.kafka.common.header.Header in project apache-kafka-on-k8s by banzaicloud.
the class KafkaProducer method doSend.
/**
* Implementation of asynchronously send a record to a topic.
*/
private Future<RecordMetadata> doSend(ProducerRecord<K, V> record, Callback callback) {
TopicPartition tp = null;
try {
// first make sure the metadata for the topic is available
ClusterAndWaitTime clusterAndWaitTime = waitOnMetadata(record.topic(), record.partition(), maxBlockTimeMs);
long remainingWaitMs = Math.max(0, maxBlockTimeMs - clusterAndWaitTime.waitedOnMetadataMs);
Cluster cluster = clusterAndWaitTime.cluster;
byte[] serializedKey;
try {
serializedKey = keySerializer.serialize(record.topic(), record.headers(), record.key());
} catch (ClassCastException cce) {
throw new SerializationException("Can't convert key of class " + record.key().getClass().getName() + " to class " + producerConfig.getClass(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG).getName() + " specified in key.serializer", cce);
}
byte[] serializedValue;
try {
serializedValue = valueSerializer.serialize(record.topic(), record.headers(), record.value());
} catch (ClassCastException cce) {
throw new SerializationException("Can't convert value of class " + record.value().getClass().getName() + " to class " + producerConfig.getClass(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG).getName() + " specified in value.serializer", cce);
}
int partition = partition(record, serializedKey, serializedValue, cluster);
tp = new TopicPartition(record.topic(), partition);
setReadOnly(record.headers());
Header[] headers = record.headers().toArray();
int serializedSize = AbstractRecords.estimateSizeInBytesUpperBound(apiVersions.maxUsableProduceMagic(), compressionType, serializedKey, serializedValue, headers);
ensureValidRecordSize(serializedSize);
long timestamp = record.timestamp() == null ? time.milliseconds() : record.timestamp();
log.trace("Sending record {} with callback {} to topic {} partition {}", record, callback, record.topic(), partition);
// producer callback will make sure to call both 'callback' and interceptor callback
Callback interceptCallback = new InterceptorCallback<>(callback, this.interceptors, tp);
if (transactionManager != null && transactionManager.isTransactional())
transactionManager.maybeAddPartitionToTransaction(tp);
RecordAccumulator.RecordAppendResult result = accumulator.append(tp, timestamp, serializedKey, serializedValue, headers, interceptCallback, remainingWaitMs);
if (result.batchIsFull || result.newBatchCreated) {
log.trace("Waking up the sender since topic {} partition {} is either full or getting a new batch", record.topic(), partition);
this.sender.wakeup();
}
return result.future;
// handling exceptions and record the errors;
// for API exceptions return them in the future,
// for other exceptions throw directly
} catch (ApiException e) {
log.debug("Exception occurred during message send:", e);
if (callback != null)
callback.onCompletion(null, e);
this.errors.record();
this.interceptors.onSendError(record, tp, e);
return new FutureFailure(e);
} catch (InterruptedException e) {
this.errors.record();
this.interceptors.onSendError(record, tp, e);
throw new InterruptException(e);
} catch (BufferExhaustedException e) {
this.errors.record();
this.metrics.sensor("buffer-exhausted-records").record();
this.interceptors.onSendError(record, tp, e);
throw e;
} catch (KafkaException e) {
this.errors.record();
this.interceptors.onSendError(record, tp, e);
throw e;
} catch (Exception e) {
// we notify interceptor about all exceptions, since onSend is called before anything else in this method
this.interceptors.onSendError(record, tp, e);
throw e;
}
}
Aggregations