use of org.apache.kafka.common.errors.LeaderNotAvailableException in project kafka by apache.
the class KafkaAdminClientTest method testDeleteRecords.
@Test
public void testDeleteRecords() throws Exception {
HashMap<Integer, Node> nodes = new HashMap<>();
nodes.put(0, new Node(0, "localhost", 8121));
List<PartitionInfo> partitionInfos = new ArrayList<>();
partitionInfos.add(new PartitionInfo("my_topic", 0, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
partitionInfos.add(new PartitionInfo("my_topic", 1, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
partitionInfos.add(new PartitionInfo("my_topic", 2, null, new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
partitionInfos.add(new PartitionInfo("my_topic", 3, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
partitionInfos.add(new PartitionInfo("my_topic", 4, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
Cluster cluster = new Cluster("mockClusterId", nodes.values(), partitionInfos, Collections.<String>emptySet(), Collections.<String>emptySet(), nodes.get(0));
TopicPartition myTopicPartition0 = new TopicPartition("my_topic", 0);
TopicPartition myTopicPartition1 = new TopicPartition("my_topic", 1);
TopicPartition myTopicPartition2 = new TopicPartition("my_topic", 2);
TopicPartition myTopicPartition3 = new TopicPartition("my_topic", 3);
TopicPartition myTopicPartition4 = new TopicPartition("my_topic", 4);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
DeleteRecordsResponseData m = new DeleteRecordsResponseData();
m.topics().add(new DeleteRecordsResponseData.DeleteRecordsTopicResult().setName(myTopicPartition0.topic()).setPartitions(new DeleteRecordsResponseData.DeleteRecordsPartitionResultCollection(asList(new DeleteRecordsResponseData.DeleteRecordsPartitionResult().setPartitionIndex(myTopicPartition0.partition()).setLowWatermark(3).setErrorCode(Errors.NONE.code()), new DeleteRecordsResponseData.DeleteRecordsPartitionResult().setPartitionIndex(myTopicPartition1.partition()).setLowWatermark(DeleteRecordsResponse.INVALID_LOW_WATERMARK).setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()), new DeleteRecordsResponseData.DeleteRecordsPartitionResult().setPartitionIndex(myTopicPartition3.partition()).setLowWatermark(DeleteRecordsResponse.INVALID_LOW_WATERMARK).setErrorCode(Errors.NOT_LEADER_OR_FOLLOWER.code()), new DeleteRecordsResponseData.DeleteRecordsPartitionResult().setPartitionIndex(myTopicPartition4.partition()).setLowWatermark(DeleteRecordsResponse.INVALID_LOW_WATERMARK).setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code())).iterator())));
List<MetadataResponse.TopicMetadata> t = new ArrayList<>();
List<MetadataResponse.PartitionMetadata> p = new ArrayList<>();
p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, myTopicPartition0, Optional.of(nodes.get(0).id()), Optional.of(5), singletonList(nodes.get(0).id()), singletonList(nodes.get(0).id()), Collections.emptyList()));
p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, myTopicPartition1, Optional.of(nodes.get(0).id()), Optional.of(5), singletonList(nodes.get(0).id()), singletonList(nodes.get(0).id()), Collections.emptyList()));
p.add(new MetadataResponse.PartitionMetadata(Errors.LEADER_NOT_AVAILABLE, myTopicPartition2, Optional.empty(), Optional.empty(), singletonList(nodes.get(0).id()), singletonList(nodes.get(0).id()), Collections.emptyList()));
p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, myTopicPartition3, Optional.of(nodes.get(0).id()), Optional.of(5), singletonList(nodes.get(0).id()), singletonList(nodes.get(0).id()), Collections.emptyList()));
p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, myTopicPartition4, Optional.of(nodes.get(0).id()), Optional.of(5), singletonList(nodes.get(0).id()), singletonList(nodes.get(0).id()), Collections.emptyList()));
t.add(new MetadataResponse.TopicMetadata(Errors.NONE, "my_topic", false, p));
env.kafkaClient().prepareResponse(RequestTestUtils.metadataResponse(cluster.nodes(), cluster.clusterResource().clusterId(), cluster.controller().id(), t));
env.kafkaClient().prepareResponse(new DeleteRecordsResponse(m));
Map<TopicPartition, RecordsToDelete> recordsToDelete = new HashMap<>();
recordsToDelete.put(myTopicPartition0, RecordsToDelete.beforeOffset(3L));
recordsToDelete.put(myTopicPartition1, RecordsToDelete.beforeOffset(10L));
recordsToDelete.put(myTopicPartition2, RecordsToDelete.beforeOffset(10L));
recordsToDelete.put(myTopicPartition3, RecordsToDelete.beforeOffset(10L));
recordsToDelete.put(myTopicPartition4, RecordsToDelete.beforeOffset(10L));
DeleteRecordsResult results = env.adminClient().deleteRecords(recordsToDelete);
// success on records deletion for partition 0
Map<TopicPartition, KafkaFuture<DeletedRecords>> values = results.lowWatermarks();
KafkaFuture<DeletedRecords> myTopicPartition0Result = values.get(myTopicPartition0);
long lowWatermark = myTopicPartition0Result.get().lowWatermark();
assertEquals(lowWatermark, 3);
// "offset out of range" failure on records deletion for partition 1
KafkaFuture<DeletedRecords> myTopicPartition1Result = values.get(myTopicPartition1);
try {
myTopicPartition1Result.get();
fail("get() should throw ExecutionException");
} catch (ExecutionException e0) {
assertTrue(e0.getCause() instanceof OffsetOutOfRangeException);
}
// "leader not available" failure on metadata request for partition 2
KafkaFuture<DeletedRecords> myTopicPartition2Result = values.get(myTopicPartition2);
try {
myTopicPartition2Result.get();
fail("get() should throw ExecutionException");
} catch (ExecutionException e1) {
assertTrue(e1.getCause() instanceof LeaderNotAvailableException);
}
// "not leader for partition" failure on records deletion for partition 3
KafkaFuture<DeletedRecords> myTopicPartition3Result = values.get(myTopicPartition3);
try {
myTopicPartition3Result.get();
fail("get() should throw ExecutionException");
} catch (ExecutionException e1) {
assertTrue(e1.getCause() instanceof NotLeaderOrFollowerException);
}
// "unknown topic or partition" failure on records deletion for partition 4
KafkaFuture<DeletedRecords> myTopicPartition4Result = values.get(myTopicPartition4);
try {
myTopicPartition4Result.get();
fail("get() should throw ExecutionException");
} catch (ExecutionException e1) {
assertTrue(e1.getCause() instanceof UnknownTopicOrPartitionException);
}
}
}
use of org.apache.kafka.common.errors.LeaderNotAvailableException in project kafka by apache.
the class InternalTopicManagerTest method shouldOnlyRetryDescribeTopicsWhenDescribeTopicsThrowsLeaderNotAvailableExceptionDuringValidation.
@Test
public void shouldOnlyRetryDescribeTopicsWhenDescribeTopicsThrowsLeaderNotAvailableExceptionDuringValidation() {
final AdminClient admin = EasyMock.createNiceMock(AdminClient.class);
final InternalTopicManager topicManager = new InternalTopicManager(time, admin, new StreamsConfig(config));
final KafkaFutureImpl<TopicDescription> topicDescriptionFailFuture = new KafkaFutureImpl<>();
topicDescriptionFailFuture.completeExceptionally(new LeaderNotAvailableException("Leader Not Available!"));
final KafkaFutureImpl<TopicDescription> topicDescriptionSuccessfulFuture = new KafkaFutureImpl<>();
topicDescriptionSuccessfulFuture.complete(new TopicDescription(topic1, false, Collections.singletonList(new TopicPartitionInfo(0, broker1, cluster, Collections.emptyList()))));
EasyMock.expect(admin.describeTopics(Collections.singleton(topic1))).andReturn(new MockDescribeTopicsResult(mkMap(mkEntry(topic1, topicDescriptionFailFuture)))).andReturn(new MockDescribeTopicsResult(mkMap(mkEntry(topic1, topicDescriptionSuccessfulFuture))));
final KafkaFutureImpl<Config> topicConfigSuccessfulFuture = new KafkaFutureImpl<>();
topicConfigSuccessfulFuture.complete(new Config(repartitionTopicConfig().entrySet().stream().map(entry -> new ConfigEntry(entry.getKey(), entry.getValue())).collect(Collectors.toSet())));
final ConfigResource topicResource = new ConfigResource(Type.TOPIC, topic1);
EasyMock.expect(admin.describeConfigs(Collections.singleton(topicResource))).andReturn(new MockDescribeConfigsResult(mkMap(mkEntry(topicResource, topicConfigSuccessfulFuture))));
EasyMock.replay(admin);
final InternalTopicConfig internalTopicConfig = setupRepartitionTopicConfig(topic1, 1);
final ValidationResult validationResult = topicManager.validate(Collections.singletonMap(topic1, internalTopicConfig));
assertThat(validationResult.missingTopics(), empty());
assertThat(validationResult.misconfigurationsForTopics(), anEmptyMap());
EasyMock.verify(admin);
}
use of org.apache.kafka.common.errors.LeaderNotAvailableException in project kafka by apache.
the class InternalTopicManagerTest method shouldOnlyRetryNotSuccessfulFuturesDuringValidation.
@Test
public void shouldOnlyRetryNotSuccessfulFuturesDuringValidation() {
final AdminClient admin = EasyMock.createNiceMock(AdminClient.class);
final InternalTopicManager topicManager = new InternalTopicManager(time, admin, new StreamsConfig(config));
final KafkaFutureImpl<TopicDescription> topicDescriptionFailFuture = new KafkaFutureImpl<>();
topicDescriptionFailFuture.completeExceptionally(new LeaderNotAvailableException("Leader Not Available!"));
final KafkaFutureImpl<TopicDescription> topicDescriptionSuccessfulFuture1 = new KafkaFutureImpl<>();
topicDescriptionSuccessfulFuture1.complete(new TopicDescription(topic1, false, Collections.singletonList(new TopicPartitionInfo(0, broker1, cluster, Collections.emptyList()))));
final KafkaFutureImpl<TopicDescription> topicDescriptionSuccessfulFuture2 = new KafkaFutureImpl<>();
topicDescriptionSuccessfulFuture2.complete(new TopicDescription(topic2, false, Collections.singletonList(new TopicPartitionInfo(0, broker1, cluster, Collections.emptyList()))));
EasyMock.expect(admin.describeTopics(mkSet(topic1, topic2))).andAnswer(() -> new MockDescribeTopicsResult(mkMap(mkEntry(topic1, topicDescriptionSuccessfulFuture1), mkEntry(topic2, topicDescriptionFailFuture))));
EasyMock.expect(admin.describeTopics(mkSet(topic2))).andAnswer(() -> new MockDescribeTopicsResult(mkMap(mkEntry(topic2, topicDescriptionSuccessfulFuture2))));
final KafkaFutureImpl<Config> topicConfigSuccessfulFuture = new KafkaFutureImpl<>();
topicConfigSuccessfulFuture.complete(new Config(repartitionTopicConfig().entrySet().stream().map(entry -> new ConfigEntry(entry.getKey(), entry.getValue())).collect(Collectors.toSet())));
final ConfigResource topicResource1 = new ConfigResource(Type.TOPIC, topic1);
final ConfigResource topicResource2 = new ConfigResource(Type.TOPIC, topic2);
EasyMock.expect(admin.describeConfigs(mkSet(topicResource1, topicResource2))).andAnswer(() -> new MockDescribeConfigsResult(mkMap(mkEntry(topicResource1, topicConfigSuccessfulFuture), mkEntry(topicResource2, topicConfigSuccessfulFuture))));
EasyMock.replay(admin);
final InternalTopicConfig internalTopicConfig1 = setupRepartitionTopicConfig(topic1, 1);
final InternalTopicConfig internalTopicConfig2 = setupRepartitionTopicConfig(topic2, 1);
final ValidationResult validationResult = topicManager.validate(mkMap(mkEntry(topic1, internalTopicConfig1), mkEntry(topic2, internalTopicConfig2)));
assertThat(validationResult.missingTopics(), empty());
assertThat(validationResult.misconfigurationsForTopics(), anEmptyMap());
EasyMock.verify(admin);
}
use of org.apache.kafka.common.errors.LeaderNotAvailableException in project kafka by apache.
the class InternalTopicManagerTest method shouldOnlyRetryDescribeConfigsWhenDescribeConfigsThrowsLeaderNotAvailableExceptionDuringValidation.
@Test
public void shouldOnlyRetryDescribeConfigsWhenDescribeConfigsThrowsLeaderNotAvailableExceptionDuringValidation() {
final AdminClient admin = EasyMock.createNiceMock(AdminClient.class);
final InternalTopicManager topicManager = new InternalTopicManager(time, admin, new StreamsConfig(config));
final KafkaFutureImpl<TopicDescription> topicDescriptionSuccessfulFuture = new KafkaFutureImpl<>();
topicDescriptionSuccessfulFuture.complete(new TopicDescription(topic1, false, Collections.singletonList(new TopicPartitionInfo(0, broker1, cluster, Collections.emptyList()))));
EasyMock.expect(admin.describeTopics(Collections.singleton(topic1))).andReturn(new MockDescribeTopicsResult(mkMap(mkEntry(topic1, topicDescriptionSuccessfulFuture))));
final KafkaFutureImpl<Config> topicConfigsFailFuture = new KafkaFutureImpl<>();
topicConfigsFailFuture.completeExceptionally(new LeaderNotAvailableException("Leader Not Available!"));
final KafkaFutureImpl<Config> topicConfigSuccessfulFuture = new KafkaFutureImpl<>();
topicConfigSuccessfulFuture.complete(new Config(repartitionTopicConfig().entrySet().stream().map(entry -> new ConfigEntry(entry.getKey(), entry.getValue())).collect(Collectors.toSet())));
final ConfigResource topicResource = new ConfigResource(Type.TOPIC, topic1);
EasyMock.expect(admin.describeConfigs(Collections.singleton(topicResource))).andReturn(new MockDescribeConfigsResult(mkMap(mkEntry(topicResource, topicConfigsFailFuture)))).andReturn(new MockDescribeConfigsResult(mkMap(mkEntry(topicResource, topicConfigSuccessfulFuture))));
EasyMock.replay(admin);
final InternalTopicConfig internalTopicConfig = setupRepartitionTopicConfig(topic1, 1);
final ValidationResult validationResult = topicManager.validate(Collections.singletonMap(topic1, internalTopicConfig));
assertThat(validationResult.missingTopics(), empty());
assertThat(validationResult.misconfigurationsForTopics(), anEmptyMap());
EasyMock.verify(admin);
}
use of org.apache.kafka.common.errors.LeaderNotAvailableException in project kafka by apache.
the class InternalTopicManagerTest method shouldCreateTopicWhenTopicLeaderNotAvailableAndThenTopicNotFound.
@Test
public void shouldCreateTopicWhenTopicLeaderNotAvailableAndThenTopicNotFound() {
final AdminClient admin = EasyMock.createNiceMock(AdminClient.class);
final InternalTopicManager topicManager = new InternalTopicManager(time, admin, new StreamsConfig(config));
final KafkaFutureImpl<TopicDescription> topicDescriptionLeaderNotAvailableFuture = new KafkaFutureImpl<>();
topicDescriptionLeaderNotAvailableFuture.completeExceptionally(new LeaderNotAvailableException("Leader Not Available!"));
final KafkaFutureImpl<TopicDescription> topicDescriptionUnknownTopicFuture = new KafkaFutureImpl<>();
topicDescriptionUnknownTopicFuture.completeExceptionally(new UnknownTopicOrPartitionException("Unknown Topic!"));
final KafkaFutureImpl<CreateTopicsResult.TopicMetadataAndConfig> topicCreationFuture = new KafkaFutureImpl<>();
topicCreationFuture.complete(EasyMock.createNiceMock(CreateTopicsResult.TopicMetadataAndConfig.class));
EasyMock.expect(admin.describeTopics(Collections.singleton(topic1))).andReturn(new MockDescribeTopicsResult(Collections.singletonMap(topic1, topicDescriptionLeaderNotAvailableFuture))).once();
// we would not need to call create-topics for the first time
EasyMock.expect(admin.describeTopics(Collections.singleton(topic1))).andReturn(new MockDescribeTopicsResult(Collections.singletonMap(topic1, topicDescriptionUnknownTopicFuture))).once();
EasyMock.expect(admin.createTopics(Collections.singleton(new NewTopic(topic1, Optional.of(1), Optional.of((short) 1)).configs(mkMap(mkEntry(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE), mkEntry(TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG, "CreateTime"), mkEntry(TopicConfig.SEGMENT_BYTES_CONFIG, "52428800"), mkEntry(TopicConfig.RETENTION_MS_CONFIG, "-1")))))).andReturn(new MockCreateTopicsResult(Collections.singletonMap(topic1, topicCreationFuture))).once();
EasyMock.replay(admin);
final InternalTopicConfig internalTopicConfig = new RepartitionTopicConfig(topic1, Collections.emptyMap());
internalTopicConfig.setNumberOfPartitions(1);
topicManager.makeReady(Collections.singletonMap(topic1, internalTopicConfig));
EasyMock.verify(admin);
}
Aggregations