use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class ConsumerRecordTest method testConstructorsWithChecksum.
@Test
@Deprecated
public void testConstructorsWithChecksum() {
String topic = "topic";
int partition = 0;
long offset = 23;
long timestamp = 23434217432432L;
TimestampType timestampType = TimestampType.CREATE_TIME;
String key = "key";
String value = "value";
long checksum = 50L;
int serializedKeySize = 100;
int serializedValueSize = 1142;
ConsumerRecord<String, String> record = new ConsumerRecord<>(topic, partition, offset, timestamp, timestampType, checksum, serializedKeySize, serializedValueSize, key, value);
assertEquals(topic, record.topic());
assertEquals(partition, record.partition());
assertEquals(offset, record.offset());
assertEquals(key, record.key());
assertEquals(value, record.value());
assertEquals(timestampType, record.timestampType());
assertEquals(timestamp, record.timestamp());
assertEquals(serializedKeySize, record.serializedKeySize());
assertEquals(serializedValueSize, record.serializedValueSize());
assertEquals(Optional.empty(), record.leaderEpoch());
assertEquals(new RecordHeaders(), record.headers());
RecordHeaders headers = new RecordHeaders();
headers.add(new RecordHeader("header key", "header value".getBytes(StandardCharsets.UTF_8)));
record = new ConsumerRecord<>(topic, partition, offset, timestamp, timestampType, checksum, serializedKeySize, serializedValueSize, key, value, headers);
assertEquals(topic, record.topic());
assertEquals(partition, record.partition());
assertEquals(offset, record.offset());
assertEquals(key, record.key());
assertEquals(value, record.value());
assertEquals(timestampType, record.timestampType());
assertEquals(timestamp, record.timestamp());
assertEquals(serializedKeySize, record.serializedKeySize());
assertEquals(serializedValueSize, record.serializedValueSize());
assertEquals(Optional.empty(), record.leaderEpoch());
assertEquals(headers, record.headers());
Optional<Integer> leaderEpoch = Optional.of(10);
record = new ConsumerRecord<>(topic, partition, offset, timestamp, timestampType, checksum, serializedKeySize, serializedValueSize, key, value, headers, leaderEpoch);
assertEquals(topic, record.topic());
assertEquals(partition, record.partition());
assertEquals(offset, record.offset());
assertEquals(key, record.key());
assertEquals(value, record.value());
assertEquals(timestampType, record.timestampType());
assertEquals(timestamp, record.timestamp());
assertEquals(serializedKeySize, record.serializedKeySize());
assertEquals(serializedValueSize, record.serializedValueSize());
assertEquals(leaderEpoch, record.leaderEpoch());
assertEquals(headers, record.headers());
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class MockConsumerTest method testSimpleMock.
@Test
public void testSimpleMock() {
consumer.subscribe(Collections.singleton("test"));
assertEquals(0, consumer.poll(Duration.ZERO).count());
consumer.rebalance(Arrays.asList(new TopicPartition("test", 0), new TopicPartition("test", 1)));
// Mock consumers need to seek manually since they cannot automatically reset offsets
HashMap<TopicPartition, Long> beginningOffsets = new HashMap<>();
beginningOffsets.put(new TopicPartition("test", 0), 0L);
beginningOffsets.put(new TopicPartition("test", 1), 0L);
consumer.updateBeginningOffsets(beginningOffsets);
consumer.seek(new TopicPartition("test", 0), 0);
ConsumerRecord<String, String> rec1 = new ConsumerRecord<>("test", 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, "key1", "value1", new RecordHeaders(), Optional.empty());
ConsumerRecord<String, String> rec2 = new ConsumerRecord<>("test", 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, "key2", "value2", new RecordHeaders(), Optional.empty());
consumer.addRecord(rec1);
consumer.addRecord(rec2);
ConsumerRecords<String, String> recs = consumer.poll(Duration.ofMillis(1));
Iterator<ConsumerRecord<String, String>> iter = recs.iterator();
assertEquals(rec1, iter.next());
assertEquals(rec2, iter.next());
assertFalse(iter.hasNext());
final TopicPartition tp = new TopicPartition("test", 0);
assertEquals(2L, consumer.position(tp));
consumer.commitSync();
assertEquals(2L, consumer.committed(Collections.singleton(tp)).get(tp).offset());
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class MirrorSourceTaskTest method testPoll.
@Test
public void testPoll() {
// Create a consumer mock
byte[] key1 = "abc".getBytes();
byte[] value1 = "fgh".getBytes();
byte[] key2 = "123".getBytes();
byte[] value2 = "456".getBytes();
List<ConsumerRecord<byte[], byte[]>> consumerRecordsList = new ArrayList<>();
String topicName = "test";
String headerKey = "key";
RecordHeaders headers = new RecordHeaders(new Header[] { new RecordHeader(headerKey, "value".getBytes()) });
consumerRecordsList.add(new ConsumerRecord<>(topicName, 0, 0, System.currentTimeMillis(), TimestampType.CREATE_TIME, key1.length, value1.length, key1, value1, headers, Optional.empty()));
consumerRecordsList.add(new ConsumerRecord<>(topicName, 1, 1, System.currentTimeMillis(), TimestampType.CREATE_TIME, key2.length, value2.length, key2, value2, headers, Optional.empty()));
ConsumerRecords<byte[], byte[]> consumerRecords = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(topicName, 0), consumerRecordsList));
@SuppressWarnings("unchecked") KafkaConsumer<byte[], byte[]> consumer = mock(KafkaConsumer.class);
when(consumer.poll(any())).thenReturn(consumerRecords);
MirrorMetrics metrics = mock(MirrorMetrics.class);
String sourceClusterName = "cluster1";
ReplicationPolicy replicationPolicy = new DefaultReplicationPolicy();
MirrorSourceTask mirrorSourceTask = new MirrorSourceTask(consumer, metrics, sourceClusterName, replicationPolicy, 50);
List<SourceRecord> sourceRecords = mirrorSourceTask.poll();
assertEquals(2, sourceRecords.size());
for (int i = 0; i < sourceRecords.size(); i++) {
SourceRecord sourceRecord = sourceRecords.get(i);
ConsumerRecord<byte[], byte[]> consumerRecord = consumerRecordsList.get(i);
assertEquals(consumerRecord.key(), sourceRecord.key(), "consumerRecord key does not equal sourceRecord key");
assertEquals(consumerRecord.value(), sourceRecord.value(), "consumerRecord value does not equal sourceRecord value");
// We expect that the topicname will be based on the replication policy currently used
assertEquals(replicationPolicy.formatRemoteTopic(sourceClusterName, topicName), sourceRecord.topic(), "topicName not the same as the current replicationPolicy");
// We expect that MirrorMaker will keep the same partition assignment
assertEquals(consumerRecord.partition(), sourceRecord.kafkaPartition().intValue(), "partition assignment not the same as the current replicationPolicy");
// Check header values
List<Header> expectedHeaders = new ArrayList<>();
consumerRecord.headers().forEach(expectedHeaders::add);
List<org.apache.kafka.connect.header.Header> taskHeaders = new ArrayList<>();
sourceRecord.headers().forEach(taskHeaders::add);
compareHeaders(expectedHeaders, taskHeaders);
}
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class KStreamSessionWindowAggregateProcessorTest method shouldLogAndMeterWhenSkippingNullKeyWithBuiltInMetrics.
@Test
public void shouldLogAndMeterWhenSkippingNullKeyWithBuiltInMetrics() {
setup(false);
context.setRecordContext(new ProcessorRecordContext(-1, -2, -3, "topic", new RecordHeaders()));
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamSessionWindowAggregate.class)) {
processor.process(new Record<>(null, "1", 0L));
assertThat(appender.getEvents().stream().filter(e -> e.getLevel().equals("WARN")).map(Event::getMessage).collect(Collectors.toList()), hasItem("Skipping record due to null key. topic=[topic] partition=[-3] offset=[-2]"));
}
assertEquals(1.0, getMetricByName(context.metrics().metrics(), "dropped-records-total", "stream-task-metrics").metricValue());
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class NamedCacheTest method shouldThrowIllegalStateExceptionWhenTryingToOverwriteDirtyEntryWithCleanEntry.
@Test
public void shouldThrowIllegalStateExceptionWhenTryingToOverwriteDirtyEntryWithCleanEntry() {
cache.put(Bytes.wrap(new byte[] { 0 }), new LRUCacheEntry(new byte[] { 10 }, headers, true, 0, 0, 0, ""));
assertThrows(IllegalStateException.class, () -> cache.put(Bytes.wrap(new byte[] { 0 }), new LRUCacheEntry(new byte[] { 10 }, new RecordHeaders(), false, 0, 0, 0, "")));
}
Aggregations