use of org.apache.kafka.common.header.internals.RecordHeader in project apache-kafka-on-k8s by banzaicloud.
the class ProducerBatchTest method testSplitPreservesHeaders.
@Test
public void testSplitPreservesHeaders() {
for (CompressionType compressionType : CompressionType.values()) {
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), MAGIC_VALUE_V2, compressionType, TimestampType.CREATE_TIME, 0L);
ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), builder, now);
Header header = new RecordHeader("header-key", "header-value".getBytes());
while (true) {
FutureRecordMetadata future = batch.tryAppend(now, "hi".getBytes(), "there".getBytes(), new Header[] { header }, null, now);
if (future == null) {
break;
}
}
Deque<ProducerBatch> batches = batch.split(200);
assertTrue("This batch should be split to multiple small batches.", batches.size() >= 2);
for (ProducerBatch splitProducerBatch : batches) {
for (RecordBatch splitBatch : splitProducerBatch.records().batches()) {
for (Record record : splitBatch) {
assertTrue("Header size should be 1.", record.headers().length == 1);
assertTrue("Header key should be 'header-key'.", record.headers()[0].key().equals("header-key"));
assertTrue("Header value should be 'header-value'.", new String(record.headers()[0].value()).equals("header-value"));
}
}
}
}
}
use of org.apache.kafka.common.header.internals.RecordHeader in project apache-kafka-on-k8s by banzaicloud.
the class KafkaProducerTest method testHeaders.
@PrepareOnlyThisForTest(Metadata.class)
@Test
public void testHeaders() throws Exception {
Properties props = new Properties();
props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
ExtendedSerializer keySerializer = PowerMock.createNiceMock(ExtendedSerializer.class);
ExtendedSerializer valueSerializer = PowerMock.createNiceMock(ExtendedSerializer.class);
KafkaProducer<String, String> producer = new KafkaProducer<>(props, keySerializer, valueSerializer);
Metadata metadata = PowerMock.createNiceMock(Metadata.class);
MemberModifier.field(KafkaProducer.class, "metadata").set(producer, metadata);
String topic = "topic";
final Cluster cluster = new Cluster("dummy", Collections.singletonList(new Node(0, "host1", 1000)), Arrays.asList(new PartitionInfo(topic, 0, null, null, null)), Collections.<String>emptySet(), Collections.<String>emptySet());
EasyMock.expect(metadata.fetch()).andReturn(cluster).anyTimes();
PowerMock.replay(metadata);
String value = "value";
ProducerRecord<String, String> record = new ProducerRecord<>(topic, value);
EasyMock.expect(keySerializer.serialize(topic, record.headers(), null)).andReturn(null).once();
EasyMock.expect(valueSerializer.serialize(topic, record.headers(), value)).andReturn(value.getBytes()).once();
PowerMock.replay(keySerializer);
PowerMock.replay(valueSerializer);
// ensure headers can be mutated pre send.
record.headers().add(new RecordHeader("test", "header2".getBytes()));
producer.send(record, null);
// ensure headers are closed and cannot be mutated post send
try {
record.headers().add(new RecordHeader("test", "test".getBytes()));
fail("Expected IllegalStateException to be raised");
} catch (IllegalStateException ise) {
// expected
}
// ensure existing headers are not changed, and last header for key is still original value
assertTrue(Arrays.equals(record.headers().lastHeader("test").value(), "header2".getBytes()));
PowerMock.verify(valueSerializer);
PowerMock.verify(keySerializer);
}
use of org.apache.kafka.common.header.internals.RecordHeader in project kafka by apache.
the class DefaultRecordTest method testBasicSerdeInvalidHeaderCountTooHigh.
@Test
public void testBasicSerdeInvalidHeaderCountTooHigh() throws IOException {
Header[] headers = new Header[] { new RecordHeader("foo", "value".getBytes()), new RecordHeader("bar", null), new RecordHeader("\"A\\u00ea\\u00f1\\u00fcC\"", "value".getBytes()) };
SimpleRecord record = new SimpleRecord(15L, "hi".getBytes(), "there".getBytes(), headers);
int baseSequence = 723;
long baseOffset = 37;
int offsetDelta = 10;
long baseTimestamp = System.currentTimeMillis();
long timestampDelta = 323;
ByteBufferOutputStream out = new ByteBufferOutputStream(1024);
DefaultRecord.writeTo(new DataOutputStream(out), offsetDelta, timestampDelta, record.key(), record.value(), record.headers());
ByteBuffer buffer = out.buffer();
buffer.flip();
buffer.put(14, (byte) 8);
assertThrows(InvalidRecordException.class, () -> DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, baseSequence, null));
}
use of org.apache.kafka.common.header.internals.RecordHeader in project kafka by apache.
the class ProcessorContextImpl method logChange.
@Override
public void logChange(final String storeName, final Bytes key, final byte[] value, final long timestamp, final Position position) {
throwUnsupportedOperationExceptionIfStandby("logChange");
final TopicPartition changelogPartition = stateManager().registeredChangelogPartitionFor(storeName);
final Headers headers;
if (!consistencyEnabled) {
headers = null;
} else {
// Add the vector clock to the header part of every record
headers = new RecordHeaders();
headers.add(ChangelogRecordDeserializationHelper.CHANGELOG_VERSION_HEADER_RECORD_CONSISTENCY);
headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position).array()));
}
collector.send(changelogPartition.topic(), key, value, headers, changelogPartition.partition(), timestamp, BYTES_KEY_SERIALIZER, BYTEARRAY_VALUE_SERIALIZER);
}
use of org.apache.kafka.common.header.internals.RecordHeader in project kafka by apache.
the class ProcessorRecordContext method deserialize.
public static ProcessorRecordContext deserialize(final ByteBuffer buffer) {
final long timestamp = buffer.getLong();
final long offset = buffer.getLong();
final String topic;
{
// we believe the topic will never be null when we serialize
final byte[] topicBytes = requireNonNull(getNullableSizePrefixedArray(buffer));
topic = new String(topicBytes, UTF_8);
}
final int partition = buffer.getInt();
final int headerCount = buffer.getInt();
final Headers headers;
if (headerCount == -1) {
// keep for backward compatibilty
headers = new RecordHeaders();
} else {
final Header[] headerArr = new Header[headerCount];
for (int i = 0; i < headerCount; i++) {
final byte[] keyBytes = requireNonNull(getNullableSizePrefixedArray(buffer));
final byte[] valueBytes = getNullableSizePrefixedArray(buffer);
headerArr[i] = new RecordHeader(new String(keyBytes, UTF_8), valueBytes);
}
headers = new RecordHeaders(headerArr);
}
return new ProcessorRecordContext(timestamp, offset, partition, topic, headers);
}
Aggregations