use of io.confluent.kafkarest.entities.v3.CreateTopicResponse in project kafka-rest by confluentinc.
the class TopicsResourceIntegrationTest method createAndDelete_nonExisting_returnsNotFoundCreatedAndNotFound.
@Test
public void createAndDelete_nonExisting_returnsNotFoundCreatedAndNotFound() {
String baseUrl = restConnect;
String clusterId = getClusterId();
String topicName = "topic-4";
Response nonExistingGetTopicResponse = request("/v3/clusters/" + clusterId + "/topics/" + topicName).accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.NOT_FOUND.getStatusCode(), nonExistingGetTopicResponse.getStatus());
CreateTopicResponse expectedCreateTopicResponse = CreateTopicResponse.create(TopicData.builder().setMetadata(Resource.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName).setResourceName("crn:///kafka=" + clusterId + "/topic=" + topicName).build()).setClusterId(clusterId).setTopicName(topicName).setInternal(false).setReplicationFactor(0).setPartitionsCount(0).setPartitions(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/partitions")).setConfigs(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/configs")).setPartitionReassignments(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/partitions/-/reassignment")).setAuthorizedOperations(emptySet()).build());
Response createTopicResponse = request("/v3/clusters/" + clusterId + "/topics").accept(MediaType.APPLICATION_JSON).post(Entity.entity("{\"topic_name\":\"" + topicName + "\",\"partitions_count\":1," + "\"configs\":[{\"name\":\"cleanup.policy\",\"value\":\"compact\"}]}", MediaType.APPLICATION_JSON));
assertEquals(Status.CREATED.getStatusCode(), createTopicResponse.getStatus());
CreateTopicResponse actualCreateTopicResponse = createTopicResponse.readEntity(CreateTopicResponse.class);
assertEquals(expectedCreateTopicResponse, actualCreateTopicResponse);
testWithRetry(() -> assertTrue(getTopicNames().contains(topicName), String.format("Topic names should contain %s after its creation", topicName)));
GetTopicResponse expectedExistingGetTopicResponse = GetTopicResponse.create(TopicData.builder().setMetadata(Resource.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName).setResourceName("crn:///kafka=" + clusterId + "/topic=" + topicName).build()).setClusterId(clusterId).setTopicName(topicName).setInternal(false).setReplicationFactor(2).setPartitionsCount(1).setPartitions(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/partitions")).setConfigs(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/configs")).setPartitionReassignments(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/partitions/-/reassignment")).setAuthorizedOperations(emptySet()).build());
testWithRetry(() -> {
Response existingTopicResponse = request("/v3/clusters/" + clusterId + "/topics/" + topicName).accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.OK.getStatusCode(), existingTopicResponse.getStatus());
GetTopicResponse actualExistingGetTopicResponse = existingTopicResponse.readEntity(GetTopicResponse.class);
assertEquals(expectedExistingGetTopicResponse, actualExistingGetTopicResponse);
});
GetTopicConfigResponse expectedExistingGetTopicConfigResponse = GetTopicConfigResponse.create(TopicConfigData.builder().setMetadata(Resource.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/configs/cleanup.policy").setResourceName("crn:///kafka=" + clusterId + "/topic=" + topicName + "/config=cleanup.policy").build()).setClusterId(clusterId).setTopicName(topicName).setName("cleanup.policy").setValue("compact").setDefault(false).setReadOnly(false).setSensitive(false).setSource(ConfigSource.DYNAMIC_TOPIC_CONFIG).setSynonyms(Arrays.asList(ConfigSynonymData.builder().setName("cleanup.policy").setValue("compact").setSource(ConfigSource.DYNAMIC_TOPIC_CONFIG).build(), ConfigSynonymData.builder().setName("log.cleanup.policy").setValue("delete").setSource(ConfigSource.DEFAULT_CONFIG).build())).build());
Response existingGetTopicConfigResponse = request("/v3/clusters/" + clusterId + "/topics/" + topicName + "/configs/cleanup.policy").accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.OK.getStatusCode(), existingGetTopicConfigResponse.getStatus());
GetTopicConfigResponse actualGetTopicConfigResponse = existingGetTopicConfigResponse.readEntity(GetTopicConfigResponse.class);
assertEquals(expectedExistingGetTopicConfigResponse, actualGetTopicConfigResponse);
Response deleteTopicResponse = request("/v3/clusters/" + clusterId + "/topics/" + topicName).accept(MediaType.APPLICATION_JSON).delete();
assertEquals(Status.NO_CONTENT.getStatusCode(), deleteTopicResponse.getStatus());
assertTrue(deleteTopicResponse.readEntity(String.class).isEmpty());
testWithRetry(() -> assertFalse(getTopicNames().contains(topicName), String.format("Topic names should not contain %s after its deletion", topicName)));
Response deletedGetTopicResponse = request("/v3/clusters/" + clusterId + "/topics/" + topicName).accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.NOT_FOUND.getStatusCode(), deletedGetTopicResponse.getStatus());
}
use of io.confluent.kafkarest.entities.v3.CreateTopicResponse in project kafka-rest by confluentinc.
the class TopicsResourceTest method createTopic_nonExistingTopic_defaultPartitionsCount_createsTopic.
@Test
public void createTopic_nonExistingTopic_defaultPartitionsCount_createsTopic() {
expect(topicManager.createTopic(CLUSTER_ID, TOPIC_1.getName(), /* partitionsCount= */
Optional.empty(), Optional.of(TOPIC_1.getReplicationFactor()), /* replicasAssignments= */
Collections.emptyMap(), singletonMap("cleanup.policy", Optional.of("compact")))).andReturn(completedFuture(null));
replay(topicManager);
FakeAsyncResponse response = new FakeAsyncResponse();
topicsResource.createTopic(response, TOPIC_1.getClusterId(), CreateTopicRequest.builder().setTopicName(TOPIC_1.getName()).setReplicationFactor(TOPIC_1.getReplicationFactor()).setConfigs(singletonList(CreateTopicRequest.ConfigEntry.create("cleanup.policy", "compact"))).build());
CreateTopicResponse expected = CreateTopicResponse.create(newTopicData("topic-1", false, 3, 0));
assertEquals(expected, response.getValue());
}
use of io.confluent.kafkarest.entities.v3.CreateTopicResponse in project kafka-rest by confluentinc.
the class TopicsResourceTest method createTopic_nonExistingTopic_createsTopic.
@Test
public void createTopic_nonExistingTopic_createsTopic() {
expect(topicManager.createTopic(CLUSTER_ID, TOPIC_1.getName(), Optional.of(TOPIC_1.getPartitions().size()), Optional.of(TOPIC_1.getReplicationFactor()), /* replicasAssignments= */
Collections.emptyMap(), singletonMap("cleanup.policy", Optional.of("compact")))).andReturn(completedFuture(null));
replay(topicManager);
FakeAsyncResponse response = new FakeAsyncResponse();
topicsResource.createTopic(response, TOPIC_1.getClusterId(), CreateTopicRequest.builder().setTopicName(TOPIC_1.getName()).setPartitionsCount(TOPIC_1.getPartitions().size()).setReplicationFactor(TOPIC_1.getReplicationFactor()).setConfigs(singletonList(CreateTopicRequest.ConfigEntry.create("cleanup.policy", "compact"))).build());
CreateTopicResponse expected = CreateTopicResponse.create(newTopicData("topic-1", false, 3, 0));
assertEquals(expected, response.getValue());
}
use of io.confluent.kafkarest.entities.v3.CreateTopicResponse in project kafka-rest by confluentinc.
the class TopicsResource method createTopic.
@POST
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
@PerformanceMetric("v3.topics.create")
@ResourceName("api.v3.topics.create")
public void createTopic(@Suspended AsyncResponse asyncResponse, @PathParam("clusterId") String clusterId, @Valid CreateTopicRequest request) {
String topicName = request.getTopicName();
Optional<Integer> partitionsCount = request.getPartitionsCount();
Optional<Short> replicationFactor = request.getReplicationFactor();
Map<Integer, List<Integer>> replicasAssignments = request.getReplicasAssignments();
Map<String, Optional<String>> configs = request.getConfigs().stream().collect(Collectors.toMap(ConfigEntry::getName, ConfigEntry::getValue));
// We have no way of knowing the default replication factor in the Kafka broker. Also in case
// of explicitly specified partition-to-replicas assignments, all partitions should have the
// same number of replicas.
short assumedReplicationFactor = replicationFactor.orElse(replicasAssignments.isEmpty() ? 0 : (short) replicasAssignments.values().iterator().next().size());
TopicData topicData = toTopicData(Topic.create(clusterId, topicName, /* partitions= */
emptyList(), assumedReplicationFactor, /* isInternal= */
false, /* authorizedOperations= */
emptySet()));
CompletableFuture<CreateTopicResponse> response = topicManager.get().createTopic(clusterId, topicName, partitionsCount, replicationFactor, replicasAssignments, configs).thenApply(none -> CreateTopicResponse.create(topicData));
AsyncResponseBuilder.from(Response.status(Status.CREATED).location(URI.create(topicData.getMetadata().getSelf()))).entity(response).asyncResume(asyncResponse);
}
use of io.confluent.kafkarest.entities.v3.CreateTopicResponse in project kafka-rest by confluentinc.
the class TopicsResourceIntegrationTest method createTopic_nonExistingTopic_returnsCreatedTopic.
@Test
public void createTopic_nonExistingTopic_returnsCreatedTopic() {
String baseUrl = restConnect;
String clusterId = getClusterId();
String topicName = "topic-4";
CreateTopicResponse expected = CreateTopicResponse.create(TopicData.builder().setMetadata(Resource.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName).setResourceName("crn:///kafka=" + clusterId + "/topic=" + topicName).build()).setClusterId(clusterId).setTopicName(topicName).setInternal(false).setReplicationFactor(1).setPartitionsCount(0).setPartitions(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/partitions")).setConfigs(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/configs")).setPartitionReassignments(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/partitions/-/reassignment")).setAuthorizedOperations(emptySet()).build());
Response response = request("/v3/clusters/" + clusterId + "/topics").accept(MediaType.APPLICATION_JSON).post(Entity.entity("{\"topic_name\":\"" + topicName + "\",\"partitions_count\":1," + "\"replication_factor\":1}", MediaType.APPLICATION_JSON));
assertEquals(Status.CREATED.getStatusCode(), response.getStatus());
CreateTopicResponse actual = response.readEntity(CreateTopicResponse.class);
assertEquals(expected, actual);
testWithRetry(() -> assertTrue(getTopicNames().contains(topicName), String.format("Topic names should contain %s after its creation", topicName)));
}
Aggregations