use of org.apache.kafka.common.record.TimestampType in project apache-kafka-on-k8s by banzaicloud.
the class WorkerSinkTaskTest method testTimestampPropagation.
@Test
public void testTimestampPropagation() throws Exception {
final Long timestamp = System.currentTimeMillis();
final TimestampType timestampType = TimestampType.CREATE_TIME;
createTask(initialState);
expectInitializeTask();
expectPollInitialAssignment();
expectConsumerPoll(1, timestamp, timestampType);
expectConversionAndTransformation(1);
Capture<Collection<SinkRecord>> records = EasyMock.newCapture(CaptureType.ALL);
sinkTask.put(EasyMock.capture(records));
PowerMock.replayAll();
workerTask.initialize(TASK_CONFIG);
workerTask.initializeAndStart();
// iter 1 -- initial assignment
workerTask.iteration();
// iter 2 -- deliver 1 record
workerTask.iteration();
SinkRecord record = records.getValue().iterator().next();
assertEquals(timestamp, record.timestamp());
assertEquals(timestampType, record.timestampType());
PowerMock.verifyAll();
}
use of org.apache.kafka.common.record.TimestampType in project kafka by apache.
the class Fetcher method parseRecord.
/**
* Parse the record entry, deserializing the key / value fields if necessary
*/
private ConsumerRecord<K, V> parseRecord(TopicPartition partition, RecordBatch batch, Record record) {
try {
long offset = record.offset();
long timestamp = record.timestamp();
Optional<Integer> leaderEpoch = maybeLeaderEpoch(batch.partitionLeaderEpoch());
TimestampType timestampType = batch.timestampType();
Headers headers = new RecordHeaders(record.headers());
ByteBuffer keyBytes = record.key();
byte[] keyByteArray = keyBytes == null ? null : Utils.toArray(keyBytes);
K key = keyBytes == null ? null : this.keyDeserializer.deserialize(partition.topic(), headers, keyByteArray);
ByteBuffer valueBytes = record.value();
byte[] valueByteArray = valueBytes == null ? null : Utils.toArray(valueBytes);
V value = valueBytes == null ? null : this.valueDeserializer.deserialize(partition.topic(), headers, valueByteArray);
return new ConsumerRecord<>(partition.topic(), partition.partition(), offset, timestamp, timestampType, keyByteArray == null ? ConsumerRecord.NULL_SIZE : keyByteArray.length, valueByteArray == null ? ConsumerRecord.NULL_SIZE : valueByteArray.length, key, value, headers, leaderEpoch);
} catch (RuntimeException e) {
throw new RecordDeserializationException(partition, record.offset(), "Error deserializing key/value for partition " + partition + " at offset " + record.offset() + ". If needed, please seek past the record to continue consumption.", e);
}
}
use of org.apache.kafka.common.record.TimestampType in project kafka by apache.
the class ConsumerRecordTest method testConstructorsWithChecksum.
@Test
@Deprecated
public void testConstructorsWithChecksum() {
String topic = "topic";
int partition = 0;
long offset = 23;
long timestamp = 23434217432432L;
TimestampType timestampType = TimestampType.CREATE_TIME;
String key = "key";
String value = "value";
long checksum = 50L;
int serializedKeySize = 100;
int serializedValueSize = 1142;
ConsumerRecord<String, String> record = new ConsumerRecord<>(topic, partition, offset, timestamp, timestampType, checksum, serializedKeySize, serializedValueSize, key, value);
assertEquals(topic, record.topic());
assertEquals(partition, record.partition());
assertEquals(offset, record.offset());
assertEquals(key, record.key());
assertEquals(value, record.value());
assertEquals(timestampType, record.timestampType());
assertEquals(timestamp, record.timestamp());
assertEquals(serializedKeySize, record.serializedKeySize());
assertEquals(serializedValueSize, record.serializedValueSize());
assertEquals(Optional.empty(), record.leaderEpoch());
assertEquals(new RecordHeaders(), record.headers());
RecordHeaders headers = new RecordHeaders();
headers.add(new RecordHeader("header key", "header value".getBytes(StandardCharsets.UTF_8)));
record = new ConsumerRecord<>(topic, partition, offset, timestamp, timestampType, checksum, serializedKeySize, serializedValueSize, key, value, headers);
assertEquals(topic, record.topic());
assertEquals(partition, record.partition());
assertEquals(offset, record.offset());
assertEquals(key, record.key());
assertEquals(value, record.value());
assertEquals(timestampType, record.timestampType());
assertEquals(timestamp, record.timestamp());
assertEquals(serializedKeySize, record.serializedKeySize());
assertEquals(serializedValueSize, record.serializedValueSize());
assertEquals(Optional.empty(), record.leaderEpoch());
assertEquals(headers, record.headers());
Optional<Integer> leaderEpoch = Optional.of(10);
record = new ConsumerRecord<>(topic, partition, offset, timestamp, timestampType, checksum, serializedKeySize, serializedValueSize, key, value, headers, leaderEpoch);
assertEquals(topic, record.topic());
assertEquals(partition, record.partition());
assertEquals(offset, record.offset());
assertEquals(key, record.key());
assertEquals(value, record.value());
assertEquals(timestampType, record.timestampType());
assertEquals(timestamp, record.timestamp());
assertEquals(serializedKeySize, record.serializedKeySize());
assertEquals(serializedValueSize, record.serializedValueSize());
assertEquals(leaderEpoch, record.leaderEpoch());
assertEquals(headers, record.headers());
}
use of org.apache.kafka.common.record.TimestampType in project beam by apache.
the class KafkaIOTest method mkMockConsumer.
// Update mock consumer with records distributed among the given topics, each with given number
// of partitions. Records are assigned in round-robin order among the partitions.
private static MockConsumer<byte[], byte[]> mkMockConsumer(List<String> topics, int partitionsPerTopic, int numElements, OffsetResetStrategy offsetResetStrategy, Map<String, Object> config, SerializableFunction<Integer, byte[]> keyFunction, SerializableFunction<Integer, byte[]> valueFunction) {
final List<TopicPartition> partitions = new ArrayList<>();
final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> records = new HashMap<>();
Map<String, List<PartitionInfo>> partitionMap = new HashMap<>();
for (String topic : topics) {
List<PartitionInfo> partIds = new ArrayList<>(partitionsPerTopic);
for (int i = 0; i < partitionsPerTopic; i++) {
TopicPartition tp = new TopicPartition(topic, i);
partitions.add(tp);
partIds.add(new PartitionInfo(topic, i, null, null, null));
records.put(tp, new ArrayList<>());
}
partitionMap.put(topic, partIds);
}
int numPartitions = partitions.size();
final long[] offsets = new long[numPartitions];
long timestampStartMillis = (Long) config.getOrDefault(TIMESTAMP_START_MILLIS_CONFIG, LOG_APPEND_START_TIME.getMillis());
TimestampType timestampType = TimestampType.forName((String) config.getOrDefault(TIMESTAMP_TYPE_CONFIG, TimestampType.LOG_APPEND_TIME.toString()));
for (int i = 0; i < numElements; i++) {
int pIdx = i % numPartitions;
TopicPartition tp = partitions.get(pIdx);
byte[] key = keyFunction.apply(i);
byte[] value = valueFunction.apply(i);
records.get(tp).add(new ConsumerRecord<>(tp.topic(), tp.partition(), offsets[pIdx]++, timestampStartMillis + Duration.standardSeconds(i).getMillis(), timestampType, 0, key.length, value.length, key, value));
}
// This is updated when reader assigns partitions.
final AtomicReference<List<TopicPartition>> assignedPartitions = new AtomicReference<>(Collections.<TopicPartition>emptyList());
final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(offsetResetStrategy) {
@Override
public synchronized void assign(final Collection<TopicPartition> assigned) {
super.assign(assigned);
assignedPartitions.set(ImmutableList.copyOf(assigned));
for (TopicPartition tp : assigned) {
updateBeginningOffsets(ImmutableMap.of(tp, 0L));
updateEndOffsets(ImmutableMap.of(tp, (long) records.get(tp).size()));
}
}
// Override offsetsForTimes() in order to look up the offsets by timestamp.
@Override
public synchronized Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch) {
return timestampsToSearch.entrySet().stream().map(e -> {
// In test scope, timestamp == offset.
long maxOffset = offsets[partitions.indexOf(e.getKey())];
long offset = e.getValue();
OffsetAndTimestamp value = (offset >= maxOffset) ? null : new OffsetAndTimestamp(offset, offset);
return new SimpleEntry<>(e.getKey(), value);
}).collect(Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue));
}
};
for (String topic : topics) {
consumer.updatePartitions(topic, partitionMap.get(topic));
}
// MockConsumer does not maintain any relationship between partition seek position and the
// records added. e.g. if we add 10 records to a partition and then seek to end of the
// partition, MockConsumer is still going to return the 10 records in next poll. It is
// our responsibility to make sure currently enqueued records sync with partition offsets.
// The following task will be called inside each invocation to MockConsumer.poll().
// We enqueue only the records with the offset >= partition's current position.
Runnable recordEnqueueTask = new Runnable() {
@Override
public void run() {
// add all the records with offset >= current partition position.
int recordsAdded = 0;
for (TopicPartition tp : assignedPartitions.get()) {
long curPos = consumer.position(tp);
for (ConsumerRecord<byte[], byte[]> r : records.get(tp)) {
if (r.offset() >= curPos) {
consumer.addRecord(r);
recordsAdded++;
}
}
}
if (recordsAdded == 0) {
if (config.get("inject.error.at.eof") != null) {
consumer.setException(new KafkaException("Injected error in consumer.poll()"));
}
// MockConsumer.poll(timeout) does not actually wait even when there aren't any
// records.
// Add a small wait here in order to avoid busy looping in the reader.
Uninterruptibles.sleepUninterruptibly(10, TimeUnit.MILLISECONDS);
// TODO: BEAM-4086: testUnboundedSourceWithoutBoundedWrapper() occasionally hangs
// without this wait. Need to look into it.
}
consumer.schedulePollTask(this);
}
};
consumer.schedulePollTask(recordEnqueueTask);
return consumer;
}
use of org.apache.kafka.common.record.TimestampType in project beam by apache.
the class KafkaTestTable method mkMockConsumer.
private MockConsumer<byte[], byte[]> mkMockConsumer(Map<String, Object> config) {
OffsetResetStrategy offsetResetStrategy = OffsetResetStrategy.EARLIEST;
final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> kafkaRecords = new HashMap<>();
Map<String, List<PartitionInfo>> partitionInfoMap = new HashMap<>();
Map<String, List<TopicPartition>> partitionMap = new HashMap<>();
// Create Topic Paritions
for (String topic : this.getTopics()) {
List<PartitionInfo> partIds = new ArrayList<>(partitionsPerTopic);
List<TopicPartition> topicParitions = new ArrayList<>(partitionsPerTopic);
for (int i = 0; i < partitionsPerTopic; i++) {
TopicPartition tp = new TopicPartition(topic, i);
topicParitions.add(tp);
partIds.add(new PartitionInfo(topic, i, null, null, null));
kafkaRecords.put(tp, new ArrayList<>());
}
partitionInfoMap.put(topic, partIds);
partitionMap.put(topic, topicParitions);
}
TimestampType timestampType = TimestampType.forName((String) config.getOrDefault(TIMESTAMP_TYPE_CONFIG, TimestampType.LOG_APPEND_TIME.toString()));
for (KafkaTestRecord record : this.records) {
int partitionIndex = record.getKey().hashCode() % partitionsPerTopic;
TopicPartition tp = partitionMap.get(record.getTopic()).get(partitionIndex);
byte[] key = record.getKey().getBytes(UTF_8);
byte[] value = record.getValue().toByteArray();
kafkaRecords.get(tp).add(new ConsumerRecord<>(tp.topic(), tp.partition(), kafkaRecords.get(tp).size(), record.getTimeStamp(), timestampType, 0, key.length, value.length, key, value));
}
// This is updated when reader assigns partitions.
final AtomicReference<List<TopicPartition>> assignedPartitions = new AtomicReference<>(Collections.emptyList());
final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(offsetResetStrategy) {
@Override
public synchronized void assign(final Collection<TopicPartition> assigned) {
Collection<TopicPartition> realPartitions = assigned.stream().map(part -> partitionMap.get(part.topic()).get(part.partition())).collect(Collectors.toList());
super.assign(realPartitions);
assignedPartitions.set(ImmutableList.copyOf(realPartitions));
for (TopicPartition tp : realPartitions) {
updateBeginningOffsets(ImmutableMap.of(tp, 0L));
updateEndOffsets(ImmutableMap.of(tp, (long) kafkaRecords.get(tp).size()));
}
}
// Override offsetsForTimes() in order to look up the offsets by timestamp.
@Override
public synchronized Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch) {
return timestampsToSearch.entrySet().stream().map(e -> {
// In test scope, timestamp == offset. ????
long maxOffset = kafkaRecords.get(e.getKey()).size();
long offset = e.getValue();
OffsetAndTimestamp value = (offset >= maxOffset) ? null : new OffsetAndTimestamp(offset, offset);
return new AbstractMap.SimpleEntry<>(e.getKey(), value);
}).collect(Collectors.toMap(AbstractMap.SimpleEntry::getKey, AbstractMap.SimpleEntry::getValue));
}
};
for (String topic : getTopics()) {
consumer.updatePartitions(topic, partitionInfoMap.get(topic));
}
Runnable recordEnqueueTask = new Runnable() {
@Override
public void run() {
// add all the records with offset >= current partition position.
int recordsAdded = 0;
for (TopicPartition tp : assignedPartitions.get()) {
long curPos = consumer.position(tp);
for (ConsumerRecord<byte[], byte[]> r : kafkaRecords.get(tp)) {
if (r.offset() >= curPos) {
consumer.addRecord(r);
recordsAdded++;
}
}
}
if (recordsAdded == 0) {
if (config.get("inject.error.at.eof") != null) {
consumer.setException(new KafkaException("Injected error in consumer.poll()"));
}
// MockConsumer.poll(timeout) does not actually wait even when there aren't any
// records.
// Add a small wait here in order to avoid busy looping in the reader.
Uninterruptibles.sleepUninterruptibly(10, TimeUnit.MILLISECONDS);
}
consumer.schedulePollTask(this);
}
};
consumer.schedulePollTask(recordEnqueueTask);
return consumer;
}
Aggregations