use of org.apache.kafka.common.errors.LeaderNotAvailableException in project kafka by apache.
the class InternalTopicManager method doValidateTopic.
private <V> void doValidateTopic(final ValidationResult validationResult, final Map<String, KafkaFuture<V>> futuresForTopic, final Map<String, InternalTopicConfig> topicsConfigs, final Set<String> topicsStillToValidate, final BiConsumer<InternalTopicConfig, V> validator) {
for (final String topicName : new HashSet<>(topicsStillToValidate)) {
if (!futuresForTopic.containsKey(topicName)) {
throw new IllegalStateException("Description results do not contain topics to validate. " + BUG_ERROR_MESSAGE);
}
final KafkaFuture<V> future = futuresForTopic.get(topicName);
if (future.isDone()) {
try {
final V brokerSideTopicConfig = future.get();
final InternalTopicConfig streamsSideTopicConfig = topicsConfigs.get(topicName);
validator.accept(streamsSideTopicConfig, brokerSideTopicConfig);
topicsStillToValidate.remove(topicName);
} catch (final ExecutionException executionException) {
final Throwable cause = executionException.getCause();
if (cause instanceof UnknownTopicOrPartitionException) {
log.info("Internal topic {} is missing", topicName);
validationResult.addMissingTopic(topicName);
topicsStillToValidate.remove(topicName);
} else if (cause instanceof LeaderNotAvailableException) {
log.info("The leader of internal topic {} is not available.", topicName);
} else if (cause instanceof TimeoutException) {
log.info("Retrieving data for internal topic {} timed out.", topicName);
} else {
log.error("Unexpected error during internal topic validation: ", cause);
throw new StreamsException(String.format("Could not validate internal topic %s for the following reason: ", topicName), cause);
}
} catch (final InterruptedException interruptedException) {
throw new InterruptException(interruptedException);
} finally {
futuresForTopic.remove(topicName);
}
}
}
}
use of org.apache.kafka.common.errors.LeaderNotAvailableException in project kafka by apache.
the class InternalTopicManager method getNumPartitions.
/**
* Try to get the number of partitions for the given topics; return the number of partitions for topics that already exists.
*
* Topics that were not able to get its description will simply not be returned
*/
// visible for testing
protected Map<String, Integer> getNumPartitions(final Set<String> topics, final Set<String> tempUnknownTopics) {
log.debug("Trying to check if topics {} have been created with expected number of partitions.", topics);
final DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(topics);
final Map<String, KafkaFuture<TopicDescription>> futures = describeTopicsResult.topicNameValues();
final Map<String, Integer> existedTopicPartition = new HashMap<>();
for (final Map.Entry<String, KafkaFuture<TopicDescription>> topicFuture : futures.entrySet()) {
final String topicName = topicFuture.getKey();
try {
final TopicDescription topicDescription = topicFuture.getValue().get();
existedTopicPartition.put(topicName, topicDescription.partitions().size());
} catch (final InterruptedException fatalException) {
// this should not happen; if it ever happens it indicate a bug
Thread.currentThread().interrupt();
log.error(INTERRUPTED_ERROR_MESSAGE, fatalException);
throw new IllegalStateException(INTERRUPTED_ERROR_MESSAGE, fatalException);
} catch (final ExecutionException couldNotDescribeTopicException) {
final Throwable cause = couldNotDescribeTopicException.getCause();
if (cause instanceof UnknownTopicOrPartitionException) {
// This topic didn't exist, proceed to try to create it
log.debug("Topic {} is unknown or not found, hence not existed yet.\n" + "Error message was: {}", topicName, cause.toString());
} else if (cause instanceof LeaderNotAvailableException) {
tempUnknownTopics.add(topicName);
log.debug("The leader of topic {} is not available.\n" + "Error message was: {}", topicName, cause.toString());
} else {
log.error("Unexpected error during topic description for {}.\n" + "Error message was: {}", topicName, cause.toString());
throw new StreamsException(String.format("Could not create topic %s.", topicName), cause);
}
} catch (final TimeoutException retriableException) {
tempUnknownTopics.add(topicName);
log.debug("Describing topic {} (to get number of partitions) timed out.\n" + "Error message was: {}", topicName, retriableException.toString());
}
}
return existedTopicPartition;
}
use of org.apache.kafka.common.errors.LeaderNotAvailableException in project cdap by caskdata.
the class KafkaOffsetResolver method getStartOffset.
/**
* Check whether the message fetched with the offset {@code checkpoint.getNextOffset() - 1} contains the
* same timestamp as in the given checkpoint. If they match, directly return {@code checkpoint.getNextOffset()}.
* If they don't, search for the smallest offset of the message with the same log event time
* as {@code checkpoint.getNextEventTime()}
*
* @param checkpoint A {@link Checkpoint} containing the next offset of a message and its log event timestamp.
* {@link Checkpoint#getNextOffset()}, {@link Checkpoint#getNextEventTime()}
* and {@link Checkpoint#getMaxEventTime()} all must return a non-negative long
* @param partition the partition in the topic for searching matching offset
* @return the next offset of the message with smallest offset and log event time equal to
* {@code checkpoint.getNextEventTime()}.
* {@code -1} if no such offset can be found or {@code checkpoint.getNextOffset()} is negative.
*
* @throws LeaderNotAvailableException if there is no Kafka broker to talk to.
* @throws OffsetOutOfRangeException if the given offset is out of range.
* @throws NotLeaderForPartitionException if the broker that the consumer is talking to is not the leader
* for the given topic and partition.
* @throws UnknownTopicOrPartitionException if the topic or partition is not known by the Kafka server
* @throws UnknownServerException if the Kafka server responded with error.
*/
long getStartOffset(final Checkpoint checkpoint, final int partition) {
// This should never happen
Preconditions.checkArgument(checkpoint.getNextOffset() > 0, "Invalid checkpoint offset");
// Get BrokerInfo for constructing SimpleConsumer
String topic = config.getTopic();
BrokerInfo brokerInfo = brokerService.getLeader(topic, partition);
if (brokerInfo == null) {
throw new LeaderNotAvailableException(String.format("BrokerInfo from BrokerService is null for topic %s partition %d. Will retry in next run.", topic, partition));
}
SimpleConsumer consumer = new SimpleConsumer(brokerInfo.getHost(), brokerInfo.getPort(), SO_TIMEOUT_MILLIS, BUFFER_SIZE, "offset-finder-" + topic + "-" + partition);
// Check whether the message fetched with the offset in the given checkpoint has the timestamp from
// checkpoint.getNextOffset() - 1 to get the offset corresponding to the timestamp in checkpoint
long offset = checkpoint.getNextOffset() - 1;
try {
long timestamp = getEventTimeByOffset(consumer, partition, offset);
if (timestamp == checkpoint.getNextEventTime()) {
return checkpoint.getNextOffset();
}
// This can happen in replicated cluster
LOG.debug("Event timestamp in {}:{} at offset {} is {}. It doesn't match with checkpoint timestamp {}", topic, partition, offset, timestamp, checkpoint.getNextEventTime());
} catch (NotFoundException | OffsetOutOfRangeException e) {
// This means we can't find the timestamp. This can happen in replicated cluster
LOG.debug("Cannot get valid log event in {}:{} at offset {}", topic, partition, offset);
}
// Find offset that has an event that matches the timestamp
long nextOffset = findStartOffset(consumer, partition, checkpoint.getNextEventTime());
LOG.debug("Found new nextOffset {} for topic {} partition {} with existing checkpoint {}.", nextOffset, topic, partition, checkpoint);
return nextOffset;
}
use of org.apache.kafka.common.errors.LeaderNotAvailableException in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClientTest method testDeleteRecords.
@Test
public void testDeleteRecords() throws Exception {
HashMap<Integer, Node> nodes = new HashMap<>();
nodes.put(0, new Node(0, "localhost", 8121));
List<PartitionInfo> partitionInfos = new ArrayList<>();
partitionInfos.add(new PartitionInfo("my_topic", 0, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
partitionInfos.add(new PartitionInfo("my_topic", 1, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
partitionInfos.add(new PartitionInfo("my_topic", 2, null, new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
partitionInfos.add(new PartitionInfo("my_topic", 3, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
partitionInfos.add(new PartitionInfo("my_topic", 4, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
Cluster cluster = new Cluster("mockClusterId", nodes.values(), partitionInfos, Collections.<String>emptySet(), Collections.<String>emptySet(), nodes.get(0));
TopicPartition myTopicPartition0 = new TopicPartition("my_topic", 0);
TopicPartition myTopicPartition1 = new TopicPartition("my_topic", 1);
TopicPartition myTopicPartition2 = new TopicPartition("my_topic", 2);
TopicPartition myTopicPartition3 = new TopicPartition("my_topic", 3);
TopicPartition myTopicPartition4 = new TopicPartition("my_topic", 4);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet());
env.kafkaClient().setNode(env.cluster().nodes().get(0));
Map<TopicPartition, DeleteRecordsResponse.PartitionResponse> m = new HashMap<>();
m.put(myTopicPartition0, new DeleteRecordsResponse.PartitionResponse(3, Errors.NONE));
m.put(myTopicPartition1, new DeleteRecordsResponse.PartitionResponse(DeleteRecordsResponse.INVALID_LOW_WATERMARK, Errors.OFFSET_OUT_OF_RANGE));
m.put(myTopicPartition3, new DeleteRecordsResponse.PartitionResponse(DeleteRecordsResponse.INVALID_LOW_WATERMARK, Errors.NOT_LEADER_FOR_PARTITION));
m.put(myTopicPartition4, new DeleteRecordsResponse.PartitionResponse(DeleteRecordsResponse.INVALID_LOW_WATERMARK, Errors.UNKNOWN_TOPIC_OR_PARTITION));
List<MetadataResponse.TopicMetadata> t = new ArrayList<>();
List<MetadataResponse.PartitionMetadata> p = new ArrayList<>();
p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, 0, nodes.get(0), Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, 1, nodes.get(0), Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
p.add(new MetadataResponse.PartitionMetadata(Errors.LEADER_NOT_AVAILABLE, 2, null, Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, 3, nodes.get(0), Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, 4, nodes.get(0), Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
t.add(new MetadataResponse.TopicMetadata(Errors.NONE, "my_topic", false, p));
env.kafkaClient().prepareResponse(new MetadataResponse(cluster.nodes(), cluster.clusterResource().clusterId(), cluster.controller().id(), t));
env.kafkaClient().prepareResponse(new DeleteRecordsResponse(0, m));
Map<TopicPartition, RecordsToDelete> recordsToDelete = new HashMap<>();
recordsToDelete.put(myTopicPartition0, RecordsToDelete.beforeOffset(3L));
recordsToDelete.put(myTopicPartition1, RecordsToDelete.beforeOffset(10L));
recordsToDelete.put(myTopicPartition2, RecordsToDelete.beforeOffset(10L));
recordsToDelete.put(myTopicPartition3, RecordsToDelete.beforeOffset(10L));
recordsToDelete.put(myTopicPartition4, RecordsToDelete.beforeOffset(10L));
DeleteRecordsResult results = env.adminClient().deleteRecords(recordsToDelete);
// success on records deletion for partition 0
Map<TopicPartition, KafkaFuture<DeletedRecords>> values = results.lowWatermarks();
KafkaFuture<DeletedRecords> myTopicPartition0Result = values.get(myTopicPartition0);
long lowWatermark = myTopicPartition0Result.get().lowWatermark();
assertEquals(lowWatermark, 3);
// "offset out of range" failure on records deletion for partition 1
KafkaFuture<DeletedRecords> myTopicPartition1Result = values.get(myTopicPartition1);
try {
myTopicPartition1Result.get();
fail("get() should throw ExecutionException");
} catch (ExecutionException e0) {
assertTrue(e0.getCause() instanceof OffsetOutOfRangeException);
}
// "leader not available" failure on metadata request for partition 2
KafkaFuture<DeletedRecords> myTopicPartition2Result = values.get(myTopicPartition2);
try {
myTopicPartition2Result.get();
fail("get() should throw ExecutionException");
} catch (ExecutionException e1) {
assertTrue(e1.getCause() instanceof LeaderNotAvailableException);
}
// "not leader for partition" failure on records deletion for partition 3
KafkaFuture<DeletedRecords> myTopicPartition3Result = values.get(myTopicPartition3);
try {
myTopicPartition3Result.get();
fail("get() should throw ExecutionException");
} catch (ExecutionException e1) {
assertTrue(e1.getCause() instanceof NotLeaderForPartitionException);
}
// "unknown topic or partition" failure on records deletion for partition 4
KafkaFuture<DeletedRecords> myTopicPartition4Result = values.get(myTopicPartition4);
try {
myTopicPartition4Result.get();
fail("get() should throw ExecutionException");
} catch (ExecutionException e1) {
assertTrue(e1.getCause() instanceof UnknownTopicOrPartitionException);
}
}
}
use of org.apache.kafka.common.errors.LeaderNotAvailableException in project apache-kafka-on-k8s by banzaicloud.
the class KafkaBasedLogTest method testProducerError.
@Test
public void testProducerError() throws Exception {
expectStart();
TestFuture<RecordMetadata> tp0Future = new TestFuture<>();
ProducerRecord<String, String> tp0Record = new ProducerRecord<>(TOPIC, TP0_KEY, TP0_VALUE);
Capture<org.apache.kafka.clients.producer.Callback> callback0 = EasyMock.newCapture();
EasyMock.expect(producer.send(EasyMock.eq(tp0Record), EasyMock.capture(callback0))).andReturn(tp0Future);
expectStop();
PowerMock.replayAll();
Map<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(TP0, 0L);
endOffsets.put(TP1, 0L);
consumer.updateEndOffsets(endOffsets);
store.start();
assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
assertEquals(0L, consumer.position(TP0));
assertEquals(0L, consumer.position(TP1));
final AtomicReference<Throwable> setException = new AtomicReference<>();
store.send(TP0_KEY, TP0_VALUE, new org.apache.kafka.clients.producer.Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
// Should only be invoked once
assertNull(setException.get());
setException.set(exception);
}
});
KafkaException exc = new LeaderNotAvailableException("Error");
tp0Future.resolve(exc);
callback0.getValue().onCompletion(null, exc);
assertNotNull(setException.get());
store.stop();
assertFalse(Whitebox.<Thread>getInternalState(store, "thread").isAlive());
assertTrue(consumer.closed());
PowerMock.verifyAll();
}
Aggregations