use of io.confluent.kafkarest.entities.v3.GetTopicResponse in project kafka-rest by confluentinc.
the class TopicsResourceIntegrationTest method createAndDelete_nonExisting_returnsNotFoundCreatedAndNotFound.
@Test
public void createAndDelete_nonExisting_returnsNotFoundCreatedAndNotFound() {
String baseUrl = restConnect;
String clusterId = getClusterId();
String topicName = "topic-4";
Response nonExistingGetTopicResponse = request("/v3/clusters/" + clusterId + "/topics/" + topicName).accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.NOT_FOUND.getStatusCode(), nonExistingGetTopicResponse.getStatus());
CreateTopicResponse expectedCreateTopicResponse = CreateTopicResponse.create(TopicData.builder().setMetadata(Resource.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName).setResourceName("crn:///kafka=" + clusterId + "/topic=" + topicName).build()).setClusterId(clusterId).setTopicName(topicName).setInternal(false).setReplicationFactor(0).setPartitionsCount(0).setPartitions(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/partitions")).setConfigs(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/configs")).setPartitionReassignments(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/partitions/-/reassignment")).setAuthorizedOperations(emptySet()).build());
Response createTopicResponse = request("/v3/clusters/" + clusterId + "/topics").accept(MediaType.APPLICATION_JSON).post(Entity.entity("{\"topic_name\":\"" + topicName + "\",\"partitions_count\":1," + "\"configs\":[{\"name\":\"cleanup.policy\",\"value\":\"compact\"}]}", MediaType.APPLICATION_JSON));
assertEquals(Status.CREATED.getStatusCode(), createTopicResponse.getStatus());
CreateTopicResponse actualCreateTopicResponse = createTopicResponse.readEntity(CreateTopicResponse.class);
assertEquals(expectedCreateTopicResponse, actualCreateTopicResponse);
testWithRetry(() -> assertTrue(getTopicNames().contains(topicName), String.format("Topic names should contain %s after its creation", topicName)));
GetTopicResponse expectedExistingGetTopicResponse = GetTopicResponse.create(TopicData.builder().setMetadata(Resource.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName).setResourceName("crn:///kafka=" + clusterId + "/topic=" + topicName).build()).setClusterId(clusterId).setTopicName(topicName).setInternal(false).setReplicationFactor(2).setPartitionsCount(1).setPartitions(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/partitions")).setConfigs(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/configs")).setPartitionReassignments(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/partitions/-/reassignment")).setAuthorizedOperations(emptySet()).build());
testWithRetry(() -> {
Response existingTopicResponse = request("/v3/clusters/" + clusterId + "/topics/" + topicName).accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.OK.getStatusCode(), existingTopicResponse.getStatus());
GetTopicResponse actualExistingGetTopicResponse = existingTopicResponse.readEntity(GetTopicResponse.class);
assertEquals(expectedExistingGetTopicResponse, actualExistingGetTopicResponse);
});
GetTopicConfigResponse expectedExistingGetTopicConfigResponse = GetTopicConfigResponse.create(TopicConfigData.builder().setMetadata(Resource.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/configs/cleanup.policy").setResourceName("crn:///kafka=" + clusterId + "/topic=" + topicName + "/config=cleanup.policy").build()).setClusterId(clusterId).setTopicName(topicName).setName("cleanup.policy").setValue("compact").setDefault(false).setReadOnly(false).setSensitive(false).setSource(ConfigSource.DYNAMIC_TOPIC_CONFIG).setSynonyms(Arrays.asList(ConfigSynonymData.builder().setName("cleanup.policy").setValue("compact").setSource(ConfigSource.DYNAMIC_TOPIC_CONFIG).build(), ConfigSynonymData.builder().setName("log.cleanup.policy").setValue("delete").setSource(ConfigSource.DEFAULT_CONFIG).build())).build());
Response existingGetTopicConfigResponse = request("/v3/clusters/" + clusterId + "/topics/" + topicName + "/configs/cleanup.policy").accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.OK.getStatusCode(), existingGetTopicConfigResponse.getStatus());
GetTopicConfigResponse actualGetTopicConfigResponse = existingGetTopicConfigResponse.readEntity(GetTopicConfigResponse.class);
assertEquals(expectedExistingGetTopicConfigResponse, actualGetTopicConfigResponse);
Response deleteTopicResponse = request("/v3/clusters/" + clusterId + "/topics/" + topicName).accept(MediaType.APPLICATION_JSON).delete();
assertEquals(Status.NO_CONTENT.getStatusCode(), deleteTopicResponse.getStatus());
assertTrue(deleteTopicResponse.readEntity(String.class).isEmpty());
testWithRetry(() -> assertFalse(getTopicNames().contains(topicName), String.format("Topic names should not contain %s after its deletion", topicName)));
Response deletedGetTopicResponse = request("/v3/clusters/" + clusterId + "/topics/" + topicName).accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.NOT_FOUND.getStatusCode(), deletedGetTopicResponse.getStatus());
}
use of io.confluent.kafkarest.entities.v3.GetTopicResponse in project kafka-rest by confluentinc.
the class MetadataAPITest method testTopicsList.
/* This should work, but due to the lack of timeouts in ZkClient, if ZK is down some calls
* will just block forever, see https://issues.apache.org/jira/browse/KAFKA-1907. We should
* reenable this once we can apply timeouts to ZK operations.
*/
/*
@Test
public void testZkFailure() throws InterruptedException {
// Kill ZK so the request will generate an error.
zookeeper.shutdown();
// Since this is handled via an ExceptionMapper, testing one endpoint is good enough
Response response = request("/brokers").get();
assertErrorResponse(Response.Status.INTERNAL_SERVER_ERROR, response,
Errors.ZOOKEEPER_ERROR_ERROR_CODE, Errors.ZOOKEEPER_ERROR_MESSAGE,
Versions.KAFKA_MOST_SPECIFIC_DEFAULT);
}
*/
@Test
public void testTopicsList() throws InterruptedException {
// Listing
testWithRetry(() -> {
Response response = request("/topics").get();
assertOKResponse(response, Versions.KAFKA_V2_JSON);
final List<String> topicsResponse = tryReadEntityOrLog(response, new GenericType<List<String>>() {
});
assertEquals(Arrays.asList(topic1Name, topic2Name), topicsResponse);
});
// Get topic
Response response1 = request("/topics/{topic}", "topic", topic1Name).get();
assertOKResponse(response1, Versions.KAFKA_V2_JSON);
final GetTopicResponse topic1Response = tryReadEntityOrLog(response1, GetTopicResponse.class);
// Just verify some basic properties because the exact values can vary based on replica
// assignment, leader election
assertEquals(topic1.getName(), topic1Response.getName());
// admin client provides default configs as well and hence not asserting for now
// assertEquals(topic1.getConfigs(), topic1Response.getConfigs());
assertEquals(topic1Partitions.size(), topic1Response.getPartitions().size());
assertEquals(numReplicas, topic1Response.getPartitions().get(0).getReplicas().size());
// Get invalid topic
final Response invalidResponse = request("/topics/{topic}", "topic", "topicdoesntexist").get();
assertErrorResponse(Response.Status.NOT_FOUND, invalidResponse, KafkaExceptionMapper.KAFKA_UNKNOWN_TOPIC_PARTITION_CODE, null, Versions.KAFKA_V2_JSON);
}
use of io.confluent.kafkarest.entities.v3.GetTopicResponse in project kafka-rest by confluentinc.
the class TopicsResource method getTopic.
@GET
@Path("/{topic}")
@PerformanceMetric("topic.get+v2")
@ResourceName("api.v2.topics.get")
public void getTopic(@Suspended AsyncResponse asyncResponse, @PathParam("topic") String topicName) {
TopicManager topicManager = topicManagerProvider.get();
TopicConfigManager topicConfigManager = topicConfigManagerProvider.get();
CompletableFuture<Topic> topicFuture = topicManager.getLocalTopic(topicName).thenApply(topic -> topic.orElseThrow(Errors::topicNotFoundException));
CompletableFuture<GetTopicResponse> response = topicFuture.thenCompose(topic -> topicConfigManager.listTopicConfigs(topic.getClusterId(), topicName)).thenCombine(topicFuture, (configs, topic) -> GetTopicResponse.fromTopic(topic, configs));
AsyncResponses.asyncResume(asyncResponse, response);
}
use of io.confluent.kafkarest.entities.v3.GetTopicResponse in project kafka-rest by confluentinc.
the class TopicsResourceTest method testGetTopic.
@Test
public void testGetTopic() {
expect(topicManager.getLocalTopic(TOPIC_1.getName())).andReturn(completedFuture(Optional.of(TOPIC_1)));
expect(topicConfigManager.listTopicConfigs(CLUSTER_ID, TOPIC_1.getName())).andReturn(completedFuture(Arrays.asList(CONFIG_1, CONFIG_2, CONFIG_3)));
replay(topicManager, topicConfigManager);
Response response1 = request("/topics/" + TOPIC_1.getName(), Versions.KAFKA_V2_JSON).get();
assertOKResponse(response1, Versions.KAFKA_V2_JSON);
final GetTopicResponse topicResponse1 = TestUtils.tryReadEntityOrLog(response1, GetTopicResponse.class);
assertEquals(GetTopicResponse.fromTopic(TOPIC_1, Arrays.asList(CONFIG_1, CONFIG_2, CONFIG_3)), topicResponse1);
}
use of io.confluent.kafkarest.entities.v3.GetTopicResponse in project kafka-rest by confluentinc.
the class TopicsResourceIntegrationTest method getTopic_existingClusterExistingTopic_returnsTopic.
@Test
public void getTopic_existingClusterExistingTopic_returnsTopic() {
String baseUrl = restConnect;
String clusterId = getClusterId();
GetTopicResponse expected = GetTopicResponse.create(TopicData.builder().setMetadata(Resource.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + TOPIC_1).setResourceName("crn:///kafka=" + clusterId + "/topic=" + TOPIC_1).build()).setClusterId(clusterId).setTopicName(TOPIC_1).setInternal(false).setReplicationFactor(1).setPartitionsCount(1).setPartitions(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + TOPIC_1 + "/partitions")).setConfigs(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + TOPIC_1 + "/configs")).setPartitionReassignments(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + TOPIC_1 + "/partitions/-/reassignment")).setAuthorizedOperations(emptySet()).build());
Response response = request("/v3/clusters/" + clusterId + "/topics/" + TOPIC_1).accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.OK.getStatusCode(), response.getStatus());
GetTopicResponse actual = response.readEntity(GetTopicResponse.class);
assertEquals(expected, actual);
}
Aggregations