use of org.apache.kafka.common.message.IncrementalAlterConfigsResponseData.AlterConfigsResourceResponse in project kafka by apache.
the class KafkaAdminClientTest method testIncrementalAlterConfigs.
@Test
public void testIncrementalAlterConfigs() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
// test error scenarios
IncrementalAlterConfigsResponseData responseData = new IncrementalAlterConfigsResponseData();
responseData.responses().add(new AlterConfigsResourceResponse().setResourceName("").setResourceType(ConfigResource.Type.BROKER.id()).setErrorCode(Errors.CLUSTER_AUTHORIZATION_FAILED.code()).setErrorMessage("authorization error"));
responseData.responses().add(new AlterConfigsResourceResponse().setResourceName("topic1").setResourceType(ConfigResource.Type.TOPIC.id()).setErrorCode(Errors.INVALID_REQUEST.code()).setErrorMessage("Config value append is not allowed for config"));
env.kafkaClient().prepareResponse(new IncrementalAlterConfigsResponse(responseData));
ConfigResource brokerResource = new ConfigResource(ConfigResource.Type.BROKER, "");
ConfigResource topicResource = new ConfigResource(ConfigResource.Type.TOPIC, "topic1");
AlterConfigOp alterConfigOp1 = new AlterConfigOp(new ConfigEntry("log.segment.bytes", "1073741"), AlterConfigOp.OpType.SET);
AlterConfigOp alterConfigOp2 = new AlterConfigOp(new ConfigEntry("compression.type", "gzip"), AlterConfigOp.OpType.APPEND);
final Map<ConfigResource, Collection<AlterConfigOp>> configs = new HashMap<>();
configs.put(brokerResource, singletonList(alterConfigOp1));
configs.put(topicResource, singletonList(alterConfigOp2));
AlterConfigsResult result = env.adminClient().incrementalAlterConfigs(configs);
TestUtils.assertFutureError(result.values().get(brokerResource), ClusterAuthorizationException.class);
TestUtils.assertFutureError(result.values().get(topicResource), InvalidRequestException.class);
// Test a call where there are no errors.
responseData = new IncrementalAlterConfigsResponseData();
responseData.responses().add(new AlterConfigsResourceResponse().setResourceName("").setResourceType(ConfigResource.Type.BROKER.id()).setErrorCode(Errors.NONE.code()).setErrorMessage(ApiError.NONE.message()));
env.kafkaClient().prepareResponse(new IncrementalAlterConfigsResponse(responseData));
env.adminClient().incrementalAlterConfigs(Collections.singletonMap(brokerResource, singletonList(alterConfigOp1))).all().get();
}
}
use of org.apache.kafka.common.message.IncrementalAlterConfigsResponseData.AlterConfigsResourceResponse in project kafka by apache.
the class IncrementalAlterConfigsRequest method getErrorResponse.
@Override
public AbstractResponse getErrorResponse(final int throttleTimeMs, final Throwable e) {
IncrementalAlterConfigsResponseData response = new IncrementalAlterConfigsResponseData();
ApiError apiError = ApiError.fromThrowable(e);
for (AlterConfigsResource resource : data.resources()) {
response.responses().add(new AlterConfigsResourceResponse().setResourceName(resource.resourceName()).setResourceType(resource.resourceType()).setErrorCode(apiError.error().code()).setErrorMessage(apiError.message()));
}
return new IncrementalAlterConfigsResponse(response);
}
use of org.apache.kafka.common.message.IncrementalAlterConfigsResponseData.AlterConfigsResourceResponse in project kafka by apache.
the class RequestResponseTest method createIncrementalAlterConfigsResponse.
private IncrementalAlterConfigsResponse createIncrementalAlterConfigsResponse() {
IncrementalAlterConfigsResponseData data = new IncrementalAlterConfigsResponseData();
data.responses().add(new AlterConfigsResourceResponse().setResourceName("testtopic").setResourceType(ResourceType.TOPIC.code()).setErrorCode(Errors.NONE.code()).setErrorMessage("Duplicate Keys"));
return new IncrementalAlterConfigsResponse(data);
}
Aggregations