use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class AssignmentInfoTest method testEncodeDecode.
@Test
public void testEncodeDecode() {
List<TaskId> activeTasks = Arrays.asList(new TaskId(0, 0), new TaskId(0, 0), new TaskId(0, 1), new TaskId(1, 0));
Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>();
standbyTasks.put(new TaskId(1, 1), Utils.mkSet(new TopicPartition("t1", 1), new TopicPartition("t2", 1)));
standbyTasks.put(new TaskId(2, 0), Utils.mkSet(new TopicPartition("t3", 0), new TopicPartition("t3", 0)));
AssignmentInfo info = new AssignmentInfo(activeTasks, standbyTasks, new HashMap<HostInfo, Set<TopicPartition>>());
AssignmentInfo decoded = AssignmentInfo.decode(info.encode());
assertEquals(info, decoded);
}
use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class ProcessorTopologyTestDriver method process.
/**
* Send an input message with the given key, value and timestamp on the specified topic to the topology, and then commit the messages.
*
* @param topicName the name of the topic on which the message is to be sent
* @param key the raw message key
* @param value the raw message value
* @param timestamp the raw message timestamp
*/
private void process(String topicName, byte[] key, byte[] value, long timestamp) {
TopicPartition tp = partitionsByTopic.get(topicName);
if (tp != null) {
// Add the record ...
long offset = offsetsByTopicPartition.get(tp).incrementAndGet();
task.addRecords(tp, records(new ConsumerRecord<>(tp.topic(), tp.partition(), offset, timestamp, TimestampType.CREATE_TIME, 0L, 0, 0, key, value)));
producer.clear();
// Process the record ...
task.process();
((InternalProcessorContext) task.context()).setRecordContext(new ProcessorRecordContext(timestamp, offset, tp.partition(), topicName));
task.commit();
// Capture all the records sent to the producer ...
for (ProducerRecord<byte[], byte[]> record : producer.history()) {
Queue<ProducerRecord<byte[], byte[]>> outputRecords = outputRecordsByTopic.get(record.topic());
if (outputRecords == null) {
outputRecords = new LinkedList<>();
outputRecordsByTopic.put(record.topic(), outputRecords);
}
outputRecords.add(record);
// Forward back into the topology if the produced record is to an internal or a source topic ...
if (internalTopics.contains(record.topic()) || topology.sourceTopics().contains(record.topic())) {
process(record.topic(), record.key(), record.value(), record.timestamp());
}
}
} else {
final TopicPartition global = globalPartitionsByTopic.get(topicName);
if (global == null) {
throw new IllegalArgumentException("Unexpected topic: " + topicName);
}
final long offset = offsetsByTopicPartition.get(global).incrementAndGet();
globalStateTask.update(new ConsumerRecord<>(global.topic(), global.partition(), offset, timestamp, TimestampType.CREATE_TIME, 0L, 0, 0, key, value));
globalStateTask.flushState();
}
}
use of org.apache.kafka.common.TopicPartition in project storm by apache.
the class KafkaSpoutRetryExponentialBackoff method retriableTopicPartitions.
@Override
public Set<TopicPartition> retriableTopicPartitions() {
final Set<TopicPartition> tps = new HashSet<>();
final long currentTimeNanos = System.nanoTime();
for (RetrySchedule retrySchedule : retrySchedules) {
if (retrySchedule.retry(currentTimeNanos)) {
final KafkaSpoutMessageId msgId = retrySchedule.msgId;
tps.add(new TopicPartition(msgId.topic(), msgId.partition()));
} else {
// Stop searching as soon as passed current time
break;
}
}
LOG.debug("Topic partitions with entries ready to be retried [{}] ", tps);
return tps;
}
use of org.apache.kafka.common.TopicPartition in project storm by apache.
the class SingleTopicKafkaSpoutTest method verifyAllMessagesCommitted.
/*
* Asserts that commitSync has been called once,
* that there are only commits on one topic,
* and that the committed offset covers messageCount messages
*/
private void verifyAllMessagesCommitted(long messageCount) {
verify(consumerSpy, times(1)).commitSync(commitCapture.capture());
Map<TopicPartition, OffsetAndMetadata> commits = commitCapture.getValue();
assertThat("Expected commits for only one topic partition", commits.entrySet().size(), is(1));
OffsetAndMetadata offset = commits.entrySet().iterator().next().getValue();
assertThat("Expected committed offset to cover all emitted messages", offset.offset(), is(messageCount - 1));
}
use of org.apache.kafka.common.TopicPartition in project storm by apache.
the class KafkaTridentSpoutEmitter method emitPartitionBatch.
@Override
public KafkaTridentSpoutBatchMetadata<K, V> emitPartitionBatch(TransactionAttempt tx, TridentCollector collector, KafkaTridentSpoutTopicPartition currBatchPartition, KafkaTridentSpoutBatchMetadata<K, V> lastBatch) {
LOG.debug("Processing batch: [transaction = {}], [currBatchPartition = {}], [lastBatchMetadata = {}], [collector = {}]", tx, currBatchPartition, lastBatch, collector);
final TopicPartition currBatchTp = currBatchPartition.getTopicPartition();
final Set<TopicPartition> assignments = kafkaConsumer.assignment();
KafkaTridentSpoutBatchMetadata<K, V> currentBatch = lastBatch;
Collection<TopicPartition> pausedTopicPartitions = Collections.emptySet();
if (assignments == null || !assignments.contains(currBatchPartition.getTopicPartition())) {
LOG.warn("SKIPPING processing batch: [transaction = {}], [currBatchPartition = {}], [lastBatchMetadata = {}], " + "[collector = {}] because it is not assigned {} to consumer instance [{}] of consumer group [{}]", tx, currBatchPartition, lastBatch, collector, assignments, kafkaConsumer, kafkaManager.getKafkaSpoutConfig().getConsumerGroupId());
} else {
try {
// pause other topic-partitions to only poll from current topic-partition
pausedTopicPartitions = pauseTopicPartitions(currBatchTp);
seek(currBatchTp, lastBatch);
// poll
if (refreshSubscriptionTimer.isExpiredResetOnTrue()) {
kafkaManager.getKafkaSpoutConfig().getSubscription().refreshAssignment();
}
final ConsumerRecords<K, V> records = kafkaConsumer.poll(pollTimeoutMs);
LOG.debug("Polled [{}] records from Kafka.", records.count());
if (!records.isEmpty()) {
emitTuples(collector, records);
// build new metadata
currentBatch = new KafkaTridentSpoutBatchMetadata<>(currBatchTp, records, lastBatch);
}
} finally {
kafkaConsumer.resume(pausedTopicPartitions);
LOG.trace("Resumed topic-partitions {}", pausedTopicPartitions);
}
LOG.debug("Emitted batch: [transaction = {}], [currBatchPartition = {}], [lastBatchMetadata = {}], " + "[currBatchMetadata = {}], [collector = {}]", tx, currBatchPartition, lastBatch, currentBatch, collector);
}
return currentBatch;
}
Aggregations