use of io.confluent.kafkarest.entities.v3.GetClusterConfigResponse in project kafka-rest by confluentinc.
the class ClusterConfigsResourceIntegrationTest method updateAndReset_existingConfig_returnsDefaultUpdatedAndDefaultAgain.
@Test
public void updateAndReset_existingConfig_returnsDefaultUpdatedAndDefaultAgain() {
String baseUrl = restConnect;
String clusterId = getClusterId();
int brokerId = getBrokers().get(0).id();
Response responseBeforeUpdate = request("/v3/clusters/" + clusterId + "/broker-configs/compression.type").accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.NOT_FOUND.getStatusCode(), responseBeforeUpdate.getStatus());
GetBrokerConfigResponse expectedBrokerBeforeUpdate = GetBrokerConfigResponse.create(BrokerConfigData.builder().setMetadata(Resource.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/brokers/" + brokerId + "/configs/compression.type").setResourceName("crn:///kafka=" + clusterId + "/broker=" + brokerId + "/config=compression.type").build()).setClusterId(clusterId).setBrokerId(brokerId).setName("compression.type").setValue("producer").setDefault(true).setReadOnly(false).setSensitive(false).setSource(ConfigSource.DEFAULT_CONFIG).setSynonyms(singletonList(ConfigSynonymData.builder().setName("compression.type").setValue("producer").setSource(ConfigSource.DEFAULT_CONFIG).build())).build());
Response brokerResponseBeforeUpdate = request("/v3/clusters/" + clusterId + "/brokers/" + brokerId + "/configs/compression.type").accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.OK.getStatusCode(), brokerResponseBeforeUpdate.getStatus());
GetBrokerConfigResponse actualBrokerBeforeUpdate = brokerResponseBeforeUpdate.readEntity(GetBrokerConfigResponse.class);
assertEquals(expectedBrokerBeforeUpdate, actualBrokerBeforeUpdate);
Response updateResponse = request("/v3/clusters/" + clusterId + "/broker-configs/compression.type").accept(MediaType.APPLICATION_JSON).put(Entity.entity("{\"value\":\"gzip\"}", MediaType.APPLICATION_JSON));
assertEquals(Status.NO_CONTENT.getStatusCode(), updateResponse.getStatus());
GetClusterConfigResponse expectedAfterUpdate = GetClusterConfigResponse.create(ClusterConfigData.builder().setMetadata(Resource.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/broker-configs/compression.type").setResourceName("crn:///kafka=" + clusterId + "/broker-config=compression.type").build()).setClusterId(clusterId).setConfigType(ClusterConfig.Type.BROKER).setName("compression.type").setValue("gzip").setDefault(false).setReadOnly(false).setSensitive(false).setSource(ConfigSource.DYNAMIC_DEFAULT_BROKER_CONFIG).setSynonyms(singletonList(ConfigSynonymData.builder().setName("compression.type").setValue("gzip").setSource(ConfigSource.DYNAMIC_DEFAULT_BROKER_CONFIG).build())).build());
testWithRetry(() -> {
Response responseAfterUpdate = request("/v3/clusters/" + clusterId + "/broker-configs/compression.type").accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.OK.getStatusCode(), responseAfterUpdate.getStatus());
GetClusterConfigResponse actualAfterUpdate = responseAfterUpdate.readEntity(GetClusterConfigResponse.class);
assertEquals(expectedAfterUpdate, actualAfterUpdate);
});
GetBrokerConfigResponse expectedBrokerAfterUpdate = GetBrokerConfigResponse.create(BrokerConfigData.builder().setMetadata(Resource.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/brokers/" + brokerId + "/configs/compression.type").setResourceName("crn:///kafka=" + clusterId + "/broker=" + brokerId + "/config=compression.type").build()).setClusterId(clusterId).setBrokerId(brokerId).setName("compression.type").setValue("gzip").setDefault(false).setReadOnly(false).setSensitive(false).setSource(ConfigSource.DYNAMIC_DEFAULT_BROKER_CONFIG).setSynonyms(Arrays.asList(ConfigSynonymData.builder().setName("compression.type").setValue("gzip").setSource(ConfigSource.DYNAMIC_DEFAULT_BROKER_CONFIG).build(), ConfigSynonymData.builder().setName("compression.type").setValue("producer").setSource(ConfigSource.DEFAULT_CONFIG).build())).build());
Response responseBrokerAfterUpdate = request("/v3/clusters/" + clusterId + "/brokers/" + brokerId + "/configs/compression.type").accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.OK.getStatusCode(), responseBrokerAfterUpdate.getStatus());
GetBrokerConfigResponse actualBrokerAfterUpdate = responseBrokerAfterUpdate.readEntity(GetBrokerConfigResponse.class);
assertEquals(expectedBrokerAfterUpdate, actualBrokerAfterUpdate);
Response resetResponse = request("/v3/clusters/" + clusterId + "/broker-configs/compression.type").accept(MediaType.APPLICATION_JSON).delete();
assertEquals(Status.NO_CONTENT.getStatusCode(), resetResponse.getStatus());
testWithRetry(() -> {
Response responseAfterReset = request("/v3/clusters/" + clusterId + "/broker-configs/compression.type").accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.NOT_FOUND.getStatusCode(), responseAfterReset.getStatus());
});
GetBrokerConfigResponse expectedBrokerAfterReset = GetBrokerConfigResponse.create(BrokerConfigData.builder().setMetadata(Resource.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/brokers/" + brokerId + "/configs/compression.type").setResourceName("crn:///kafka=" + clusterId + "/broker=" + brokerId + "/config=compression.type").build()).setClusterId(clusterId).setBrokerId(brokerId).setName("compression.type").setValue("producer").setDefault(true).setReadOnly(false).setSensitive(false).setSource(ConfigSource.DEFAULT_CONFIG).setSynonyms(singletonList(ConfigSynonymData.builder().setName("compression.type").setValue("producer").setSource(ConfigSource.DEFAULT_CONFIG).build())).build());
testWithRetry(() -> {
Response brokerResponseAfterReset = request("/v3/clusters/" + clusterId + "/brokers/" + brokerId + "/configs/compression.type").accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.OK.getStatusCode(), brokerResponseAfterReset.getStatus());
GetBrokerConfigResponse actualBrokerAfterReset = brokerResponseAfterReset.readEntity(GetBrokerConfigResponse.class);
assertEquals(expectedBrokerAfterReset, actualBrokerAfterReset);
});
}
use of io.confluent.kafkarest.entities.v3.GetClusterConfigResponse in project kafka-rest by confluentinc.
the class ClusterConfigResourceTest method getClusterConfig_existingConfig_returnsConfig.
@Test
public void getClusterConfig_existingConfig_returnsConfig() {
expect(clusterConfigManager.getClusterConfig(CLUSTER_ID, ClusterConfig.Type.BROKER, CONFIG_1.getName())).andReturn(completedFuture(Optional.of(CONFIG_1)));
replay(clusterConfigManager);
FakeAsyncResponse response = new FakeAsyncResponse();
clusterConfigsResource.getClusterConfig(response, CLUSTER_ID, ClusterConfig.Type.BROKER, CONFIG_1.getName());
GetClusterConfigResponse expected = GetClusterConfigResponse.create(ClusterConfigData.builder().setMetadata(Resource.Metadata.builder().setSelf("/v3/clusters/cluster-1/broker-configs/config-1").setResourceName("crn:///kafka=cluster-1/broker-config=config-1").build()).setClusterId(CLUSTER_ID).setConfigType(ClusterConfig.Type.BROKER).setName(CONFIG_1.getName()).setValue(CONFIG_1.getValue()).setDefault(CONFIG_1.isDefault()).setReadOnly(CONFIG_1.isReadOnly()).setSensitive(CONFIG_1.isSensitive()).setSource(CONFIG_1.getSource()).setSynonyms(CONFIG_1.getSynonyms().stream().map(ConfigSynonymData::fromConfigSynonym).collect(Collectors.toList())).build());
assertEquals(expected, response.getValue());
}
Aggregations