use of io.confluent.kafkarest.entities.v3.GetTopicConfigResponse in project kafka-rest by confluentinc.
the class TopicsResourceIntegrationTest method createAndDelete_nonExisting_returnsNotFoundCreatedAndNotFound.
@Test
public void createAndDelete_nonExisting_returnsNotFoundCreatedAndNotFound() {
String baseUrl = restConnect;
String clusterId = getClusterId();
String topicName = "topic-4";
Response nonExistingGetTopicResponse = request("/v3/clusters/" + clusterId + "/topics/" + topicName).accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.NOT_FOUND.getStatusCode(), nonExistingGetTopicResponse.getStatus());
CreateTopicResponse expectedCreateTopicResponse = CreateTopicResponse.create(TopicData.builder().setMetadata(Resource.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName).setResourceName("crn:///kafka=" + clusterId + "/topic=" + topicName).build()).setClusterId(clusterId).setTopicName(topicName).setInternal(false).setReplicationFactor(0).setPartitionsCount(0).setPartitions(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/partitions")).setConfigs(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/configs")).setPartitionReassignments(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/partitions/-/reassignment")).setAuthorizedOperations(emptySet()).build());
Response createTopicResponse = request("/v3/clusters/" + clusterId + "/topics").accept(MediaType.APPLICATION_JSON).post(Entity.entity("{\"topic_name\":\"" + topicName + "\",\"partitions_count\":1," + "\"configs\":[{\"name\":\"cleanup.policy\",\"value\":\"compact\"}]}", MediaType.APPLICATION_JSON));
assertEquals(Status.CREATED.getStatusCode(), createTopicResponse.getStatus());
CreateTopicResponse actualCreateTopicResponse = createTopicResponse.readEntity(CreateTopicResponse.class);
assertEquals(expectedCreateTopicResponse, actualCreateTopicResponse);
testWithRetry(() -> assertTrue(getTopicNames().contains(topicName), String.format("Topic names should contain %s after its creation", topicName)));
GetTopicResponse expectedExistingGetTopicResponse = GetTopicResponse.create(TopicData.builder().setMetadata(Resource.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName).setResourceName("crn:///kafka=" + clusterId + "/topic=" + topicName).build()).setClusterId(clusterId).setTopicName(topicName).setInternal(false).setReplicationFactor(2).setPartitionsCount(1).setPartitions(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/partitions")).setConfigs(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/configs")).setPartitionReassignments(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/partitions/-/reassignment")).setAuthorizedOperations(emptySet()).build());
testWithRetry(() -> {
Response existingTopicResponse = request("/v3/clusters/" + clusterId + "/topics/" + topicName).accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.OK.getStatusCode(), existingTopicResponse.getStatus());
GetTopicResponse actualExistingGetTopicResponse = existingTopicResponse.readEntity(GetTopicResponse.class);
assertEquals(expectedExistingGetTopicResponse, actualExistingGetTopicResponse);
});
GetTopicConfigResponse expectedExistingGetTopicConfigResponse = GetTopicConfigResponse.create(TopicConfigData.builder().setMetadata(Resource.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + topicName + "/configs/cleanup.policy").setResourceName("crn:///kafka=" + clusterId + "/topic=" + topicName + "/config=cleanup.policy").build()).setClusterId(clusterId).setTopicName(topicName).setName("cleanup.policy").setValue("compact").setDefault(false).setReadOnly(false).setSensitive(false).setSource(ConfigSource.DYNAMIC_TOPIC_CONFIG).setSynonyms(Arrays.asList(ConfigSynonymData.builder().setName("cleanup.policy").setValue("compact").setSource(ConfigSource.DYNAMIC_TOPIC_CONFIG).build(), ConfigSynonymData.builder().setName("log.cleanup.policy").setValue("delete").setSource(ConfigSource.DEFAULT_CONFIG).build())).build());
Response existingGetTopicConfigResponse = request("/v3/clusters/" + clusterId + "/topics/" + topicName + "/configs/cleanup.policy").accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.OK.getStatusCode(), existingGetTopicConfigResponse.getStatus());
GetTopicConfigResponse actualGetTopicConfigResponse = existingGetTopicConfigResponse.readEntity(GetTopicConfigResponse.class);
assertEquals(expectedExistingGetTopicConfigResponse, actualGetTopicConfigResponse);
Response deleteTopicResponse = request("/v3/clusters/" + clusterId + "/topics/" + topicName).accept(MediaType.APPLICATION_JSON).delete();
assertEquals(Status.NO_CONTENT.getStatusCode(), deleteTopicResponse.getStatus());
assertTrue(deleteTopicResponse.readEntity(String.class).isEmpty());
testWithRetry(() -> assertFalse(getTopicNames().contains(topicName), String.format("Topic names should not contain %s after its deletion", topicName)));
Response deletedGetTopicResponse = request("/v3/clusters/" + clusterId + "/topics/" + topicName).accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.NOT_FOUND.getStatusCode(), deletedGetTopicResponse.getStatus());
}
use of io.confluent.kafkarest.entities.v3.GetTopicConfigResponse in project kafka-rest by confluentinc.
the class TopicConfigsResourceIntegrationTest method getTopicConfig_existingConfig_returnsConfig.
@Test
public void getTopicConfig_existingConfig_returnsConfig() {
String baseUrl = restConnect;
String clusterId = getClusterId();
GetTopicConfigResponse expected = GetTopicConfigResponse.create(TopicConfigData.builder().setMetadata(Resource.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + TOPIC_1 + "/configs/cleanup.policy").setResourceName("crn:///kafka=" + clusterId + "/topic=" + TOPIC_1 + "/config=cleanup.policy").build()).setClusterId(clusterId).setTopicName(TOPIC_1).setName("cleanup.policy").setValue("delete").setDefault(true).setReadOnly(false).setSensitive(false).setSource(ConfigSource.DEFAULT_CONFIG).setSynonyms(singletonList(ConfigSynonymData.builder().setName("log.cleanup.policy").setValue("delete").setSource(ConfigSource.DEFAULT_CONFIG).build())).build());
Response response = request("/v3/clusters/" + clusterId + "/topics/" + TOPIC_1 + "/configs/cleanup.policy").accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.OK.getStatusCode(), response.getStatus());
GetTopicConfigResponse actual = response.readEntity(GetTopicConfigResponse.class);
assertEquals(expected, actual);
}
use of io.confluent.kafkarest.entities.v3.GetTopicConfigResponse in project kafka-rest by confluentinc.
the class TopicConfigsResourceIntegrationTest method alterConfigBatch_withExistingConfig.
@Test
public void alterConfigBatch_withExistingConfig() {
String baseUrl = restConnect;
String clusterId = getClusterId();
Response updateResponse = request("/v3/clusters/" + clusterId + "/topics/" + TOPIC_1 + "/configs:alter").accept(MediaType.APPLICATION_JSON).post(Entity.entity("{\"data\":[" + "{\"name\": \"cleanup.policy\",\"value\":\"compact\"}," + "{\"name\": \"compression.type\",\"value\":\"gzip\"}]}", MediaType.APPLICATION_JSON));
assertEquals(Status.NO_CONTENT.getStatusCode(), updateResponse.getStatus());
GetTopicConfigResponse expectedAfterUpdate1 = GetTopicConfigResponse.create(TopicConfigData.builder().setMetadata(Resource.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + TOPIC_1 + "/configs/cleanup.policy").setResourceName("crn:///kafka=" + clusterId + "/topic=" + TOPIC_1 + "/config=cleanup.policy").build()).setClusterId(clusterId).setTopicName(TOPIC_1).setName("cleanup.policy").setValue("compact").setDefault(false).setReadOnly(false).setSensitive(false).setSource(ConfigSource.DYNAMIC_TOPIC_CONFIG).setSynonyms(Arrays.asList(ConfigSynonymData.builder().setName("cleanup.policy").setValue("compact").setSource(ConfigSource.DYNAMIC_TOPIC_CONFIG).build(), ConfigSynonymData.builder().setName("log.cleanup.policy").setValue("delete").setSource(ConfigSource.DEFAULT_CONFIG).build())).build());
GetTopicConfigResponse expectedAfterUpdate2 = GetTopicConfigResponse.create(TopicConfigData.builder().setMetadata(Resource.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + TOPIC_1 + "/configs/compression.type").setResourceName("crn:///kafka=" + clusterId + "/topic=" + TOPIC_1 + "/config=compression.type").build()).setClusterId(clusterId).setTopicName(TOPIC_1).setName("compression.type").setValue("gzip").setDefault(false).setReadOnly(false).setSensitive(false).setSource(ConfigSource.DYNAMIC_TOPIC_CONFIG).setSynonyms(Arrays.asList(ConfigSynonymData.builder().setName("compression.type").setValue("gzip").setSource(ConfigSource.DYNAMIC_TOPIC_CONFIG).build(), ConfigSynonymData.builder().setName("compression.type").setValue("producer").setSource(ConfigSource.DEFAULT_CONFIG).build())).build());
testWithRetry(() -> {
Response responseAfterUpdate1 = request("/v3/clusters/" + clusterId + "/topics/" + TOPIC_1 + "/configs/cleanup.policy").accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.OK.getStatusCode(), responseAfterUpdate1.getStatus());
GetTopicConfigResponse actualResponseAfterUpdate1 = responseAfterUpdate1.readEntity(GetTopicConfigResponse.class);
assertEquals(expectedAfterUpdate1, actualResponseAfterUpdate1);
});
testWithRetry(() -> {
Response responseAfterUpdate2 = request("/v3/clusters/" + clusterId + "/topics/" + TOPIC_1 + "/configs/compression.type").accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.OK.getStatusCode(), responseAfterUpdate2.getStatus());
GetTopicConfigResponse actualResponseAfterUpdate2 = responseAfterUpdate2.readEntity(GetTopicConfigResponse.class);
assertEquals(expectedAfterUpdate2, actualResponseAfterUpdate2);
});
}
use of io.confluent.kafkarest.entities.v3.GetTopicConfigResponse in project kafka-rest by confluentinc.
the class TopicConfigsResourceIntegrationTest method getUpdateReset_withExistingConfig.
@Test
public void getUpdateReset_withExistingConfig() {
String baseUrl = restConnect;
String clusterId = getClusterId();
GetTopicConfigResponse expectedBeforeUpdate = GetTopicConfigResponse.create(TopicConfigData.builder().setMetadata(Resource.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + TOPIC_1 + "/configs/cleanup.policy").setResourceName("crn:///kafka=" + clusterId + "/topic=" + TOPIC_1 + "/config=cleanup.policy").build()).setClusterId(clusterId).setTopicName(TOPIC_1).setName("cleanup.policy").setValue("delete").setDefault(true).setReadOnly(false).setSensitive(false).setSource(ConfigSource.DEFAULT_CONFIG).setSynonyms(singletonList(ConfigSynonymData.builder().setName("log.cleanup.policy").setValue("delete").setSource(ConfigSource.DEFAULT_CONFIG).build())).build());
Response responseBeforeUpdate = request("/v3/clusters/" + clusterId + "/topics/" + TOPIC_1 + "/configs/cleanup.policy").accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.OK.getStatusCode(), responseBeforeUpdate.getStatus());
GetTopicConfigResponse actualResponseBeforeUpdate = responseBeforeUpdate.readEntity(GetTopicConfigResponse.class);
assertEquals(expectedBeforeUpdate, actualResponseBeforeUpdate);
Response updateResponse = request("/v3/clusters/" + clusterId + "/topics/" + TOPIC_1 + "/configs/cleanup.policy").accept(MediaType.APPLICATION_JSON).put(Entity.entity("{\"value\":\"compact\"}", MediaType.APPLICATION_JSON));
assertEquals(Status.NO_CONTENT.getStatusCode(), updateResponse.getStatus());
GetTopicConfigResponse expectedAfterUpdate = GetTopicConfigResponse.create(TopicConfigData.builder().setMetadata(Resource.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + TOPIC_1 + "/configs/cleanup.policy").setResourceName("crn:///kafka=" + clusterId + "/topic=" + TOPIC_1 + "/config=cleanup.policy").build()).setClusterId(clusterId).setTopicName(TOPIC_1).setName("cleanup.policy").setValue("compact").setDefault(false).setReadOnly(false).setSensitive(false).setSource(ConfigSource.DYNAMIC_TOPIC_CONFIG).setSynonyms(Arrays.asList(ConfigSynonymData.builder().setName("cleanup.policy").setValue("compact").setSource(ConfigSource.DYNAMIC_TOPIC_CONFIG).build(), ConfigSynonymData.builder().setName("log.cleanup.policy").setValue("delete").setSource(ConfigSource.DEFAULT_CONFIG).build())).build());
testWithRetry(() -> {
Response responseAfterUpdate = request("/v3/clusters/" + clusterId + "/topics/" + TOPIC_1 + "/configs/cleanup.policy").accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.OK.getStatusCode(), responseAfterUpdate.getStatus());
GetTopicConfigResponse actualResponseAfterUpdate = responseAfterUpdate.readEntity(GetTopicConfigResponse.class);
assertEquals(expectedAfterUpdate, actualResponseAfterUpdate);
});
Response resetResponse = request("/v3/clusters/" + clusterId + "/topics/" + TOPIC_1 + "/configs/cleanup.policy").accept(MediaType.APPLICATION_JSON).delete();
assertEquals(Status.NO_CONTENT.getStatusCode(), resetResponse.getStatus());
GetTopicConfigResponse expectedAfterReset = GetTopicConfigResponse.create(TopicConfigData.builder().setMetadata(Resource.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + TOPIC_1 + "/configs/cleanup.policy").setResourceName("crn:///kafka=" + clusterId + "/topic=" + TOPIC_1 + "/config=cleanup.policy").build()).setClusterId(clusterId).setTopicName(TOPIC_1).setName("cleanup.policy").setValue("delete").setDefault(true).setReadOnly(false).setSensitive(false).setSource(ConfigSource.DEFAULT_CONFIG).setSynonyms(singletonList(ConfigSynonymData.builder().setName("log.cleanup.policy").setValue("delete").setSource(ConfigSource.DEFAULT_CONFIG).build())).build());
testWithRetry(() -> {
Response responseAfterReset = request("/v3/clusters/" + clusterId + "/topics/" + TOPIC_1 + "/configs/cleanup.policy").accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.OK.getStatusCode(), responseAfterReset.getStatus());
GetTopicConfigResponse actualResponseAfterReset = responseAfterReset.readEntity(GetTopicConfigResponse.class);
assertEquals(expectedAfterReset, actualResponseAfterReset);
});
}
use of io.confluent.kafkarest.entities.v3.GetTopicConfigResponse in project kafka-rest by confluentinc.
the class TopicConfigsResourceTest method getTopicConfig_existingConfig_returnsConfig.
@Test
public void getTopicConfig_existingConfig_returnsConfig() {
expect(topicConfigManager.getTopicConfig(CLUSTER_ID, TOPIC_NAME, CONFIG_1.getName())).andReturn(completedFuture(Optional.of(CONFIG_1)));
replay(topicConfigManager);
FakeAsyncResponse response = new FakeAsyncResponse();
topicConfigsResource.getTopicConfig(response, CLUSTER_ID, TOPIC_NAME, CONFIG_1.getName());
GetTopicConfigResponse expected = GetTopicConfigResponse.create(TopicConfigData.builder().setMetadata(Resource.Metadata.builder().setSelf("/v3/clusters/cluster-1/topics/topic-1/configs/config-1").setResourceName("crn:///kafka=cluster-1/topic=topic-1/config=config-1").build()).setClusterId(CLUSTER_ID).setTopicName(TOPIC_NAME).setName(CONFIG_1.getName()).setValue(CONFIG_1.getValue()).setDefault(CONFIG_1.isDefault()).setReadOnly(CONFIG_1.isReadOnly()).setSensitive(CONFIG_1.isSensitive()).setSource(CONFIG_1.getSource()).setSynonyms(CONFIG_1.getSynonyms().stream().map(ConfigSynonymData::fromConfigSynonym).collect(Collectors.toList())).build());
assertEquals(expected, response.getValue());
}
Aggregations