use of org.apache.kafka.common.header.internals.RecordHeader in project kafka by apache.
the class ProcessorContextImplTest method shouldSendRecordHeadersToChangelogTopicWhenConsistencyEnabled.
@Test
public void shouldSendRecordHeadersToChangelogTopicWhenConsistencyEnabled() {
final Position position = Position.emptyPosition();
final Headers headers = new RecordHeaders();
headers.add(ChangelogRecordDeserializationHelper.CHANGELOG_VERSION_HEADER_RECORD_CONSISTENCY);
headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position).array()));
recordCollector.send(CHANGELOG_PARTITION.topic(), KEY_BYTES, VALUE_BYTES, headers, CHANGELOG_PARTITION.partition(), TIMESTAMP, BYTES_KEY_SERIALIZER, BYTEARRAY_VALUE_SERIALIZER);
final StreamTask task = EasyMock.createNiceMock(StreamTask.class);
replay(recordCollector, task);
context = new ProcessorContextImpl(mock(TaskId.class), streamsConfigWithConsistencyMock(), stateManager, mock(StreamsMetricsImpl.class), mock(ThreadCache.class));
context.transitionToActive(task, recordCollector, null);
context.logChange(REGISTERED_STORE_NAME, KEY_BYTES, VALUE_BYTES, TIMESTAMP, position);
verify(recordCollector);
}
use of org.apache.kafka.common.header.internals.RecordHeader in project kafka by apache.
the class RecordCollectorTest method shouldSendWithPartitioner.
@Test
public void shouldSendWithPartitioner() {
final Headers headers = new RecordHeaders(new Header[] { new RecordHeader("key", "value".getBytes()) });
collector.send(topic, "3", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
collector.send(topic, "9", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
collector.send(topic, "27", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
collector.send(topic, "81", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
collector.send(topic, "243", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
collector.send(topic, "28", "0", headers, null, stringSerializer, stringSerializer, streamPartitioner);
collector.send(topic, "82", "0", headers, null, stringSerializer, stringSerializer, streamPartitioner);
collector.send(topic, "244", "0", headers, null, stringSerializer, stringSerializer, streamPartitioner);
collector.send(topic, "245", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
final Map<TopicPartition, Long> offsets = collector.offsets();
assertEquals(4L, (long) offsets.get(new TopicPartition(topic, 0)));
assertEquals(2L, (long) offsets.get(new TopicPartition(topic, 1)));
assertEquals(0L, (long) offsets.get(new TopicPartition(topic, 2)));
assertEquals(9, mockProducer.history().size());
// returned offsets should not be modified
final TopicPartition topicPartition = new TopicPartition(topic, 0);
assertThrows(UnsupportedOperationException.class, () -> offsets.put(topicPartition, 50L));
}
use of org.apache.kafka.common.header.internals.RecordHeader in project kafka by apache.
the class DefaultRecord method readHeaders.
private static Header[] readHeaders(ByteBuffer buffer, int numHeaders) {
Header[] headers = new Header[numHeaders];
for (int i = 0; i < numHeaders; i++) {
int headerKeySize = ByteUtils.readVarint(buffer);
if (headerKeySize < 0)
throw new InvalidRecordException("Invalid negative header key size " + headerKeySize);
ByteBuffer headerKeyBuffer = buffer.slice();
headerKeyBuffer.limit(headerKeySize);
buffer.position(buffer.position() + headerKeySize);
ByteBuffer headerValue = null;
int headerValueSize = ByteUtils.readVarint(buffer);
if (headerValueSize >= 0) {
headerValue = buffer.slice();
headerValue.limit(headerValueSize);
buffer.position(buffer.position() + headerValueSize);
}
headers[i] = new RecordHeader(headerKeyBuffer, headerValue);
}
return headers;
}
use of org.apache.kafka.common.header.internals.RecordHeader in project kafka by apache.
the class KafkaProducerTest method doTestHeaders.
private <T extends Serializer<String>> void doTestHeaders(Class<T> serializerClassToMock) {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
Serializer<String> keySerializer = mock(serializerClassToMock);
Serializer<String> valueSerializer = mock(serializerClassToMock);
long nowMs = Time.SYSTEM.milliseconds();
String topic = "topic";
ProducerMetadata metadata = newMetadata(0, 90000);
metadata.add(topic, nowMs);
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap(topic, 1));
metadata.updateWithCurrentRequestVersion(initialUpdateResponse, false, nowMs);
KafkaProducer<String, String> producer = kafkaProducer(configs, keySerializer, valueSerializer, metadata, null, null, Time.SYSTEM);
when(keySerializer.serialize(any(), any(), any())).then(invocation -> invocation.<String>getArgument(2).getBytes());
when(valueSerializer.serialize(any(), any(), any())).then(invocation -> invocation.<String>getArgument(2).getBytes());
String value = "value";
String key = "key";
ProducerRecord<String, String> record = new ProducerRecord<>(topic, key, value);
// ensure headers can be mutated pre send.
record.headers().add(new RecordHeader("test", "header2".getBytes()));
producer.send(record, null);
// ensure headers are closed and cannot be mutated post send
assertThrows(IllegalStateException.class, () -> record.headers().add(new RecordHeader("test", "test".getBytes())));
// ensure existing headers are not changed, and last header for key is still original value
assertArrayEquals(record.headers().lastHeader("test").value(), "header2".getBytes());
verify(valueSerializer).serialize(topic, record.headers(), value);
verify(keySerializer).serialize(topic, record.headers(), key);
producer.close(Duration.ofMillis(0));
}
use of org.apache.kafka.common.header.internals.RecordHeader in project kafka by apache.
the class TestRecordTest method testEqualsAndHashCode.
@Test
public void testEqualsAndHashCode() {
final TestRecord<String, Integer> testRecord = new TestRecord<>(key, value, headers, recordTime);
assertEquals(testRecord, testRecord);
assertEquals(testRecord.hashCode(), testRecord.hashCode());
final TestRecord<String, Integer> equalRecord = new TestRecord<>(key, value, headers, recordTime);
assertEquals(testRecord, equalRecord);
assertEquals(testRecord.hashCode(), equalRecord.hashCode());
final TestRecord<String, Integer> equalRecordMs = new TestRecord<>(key, value, headers, recordMs);
assertEquals(testRecord, equalRecordMs);
assertEquals(testRecord.hashCode(), equalRecordMs.hashCode());
final Headers headers2 = new RecordHeaders(new Header[] { new RecordHeader("foo", "value".getBytes()), new RecordHeader("bar", null) });
final TestRecord<String, Integer> headerMismatch = new TestRecord<>(key, value, headers2, recordTime);
assertNotEquals(testRecord, headerMismatch);
final TestRecord<String, Integer> keyMisMatch = new TestRecord<>("test-mismatch", value, headers, recordTime);
assertNotEquals(testRecord, keyMisMatch);
final TestRecord<String, Integer> valueMisMatch = new TestRecord<>(key, 2, headers, recordTime);
assertNotEquals(testRecord, valueMisMatch);
final TestRecord<String, Integer> timeMisMatch = new TestRecord<>(key, value, headers, recordTime.plusMillis(1));
assertNotEquals(testRecord, timeMisMatch);
final TestRecord<String, Integer> nullFieldsRecord = new TestRecord<>(null, null, null, (Instant) null);
assertEquals(nullFieldsRecord, nullFieldsRecord);
assertEquals(nullFieldsRecord.hashCode(), nullFieldsRecord.hashCode());
}
Aggregations