use of io.confluent.rest.annotations.PerformanceMetric in project kafka-rest by confluentinc.
the class TopicsResource method list.
@GET
@PerformanceMetric("topics.list+v2")
@ResourceName("api.v2.topics.list")
public void list(@Suspended AsyncResponse asyncResponse) {
TopicManager topicManager = topicManagerProvider.get();
CompletableFuture<List<String>> response = topicManager.listLocalTopics().thenApply(topics -> topics.stream().map(Topic::getName).collect(Collectors.toList()));
AsyncResponses.asyncResume(asyncResponse, response);
}
use of io.confluent.rest.annotations.PerformanceMetric in project kafka-rest by confluentinc.
the class ProduceAction method produce.
@POST
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
@PerformanceMetric("v3.produce.produce-to-topic")
@ResourceName("api.v3.produce.produce-to-topic")
public void produce(@Suspended AsyncResponse asyncResponse, @PathParam("clusterId") String clusterId, @PathParam("topicName") String topicName, MappingIterator<ProduceRequest> requests) throws Exception {
if (requests == null) {
throw Errors.invalidPayloadException("Null input provided. Data is required.");
}
ProduceController controller = produceControllerProvider.get();
streamingResponseFactory.from(requests).compose(request -> produce(clusterId, topicName, request, controller)).resume(asyncResponse);
}
use of io.confluent.rest.annotations.PerformanceMetric in project kafka-rest by confluentinc.
the class ListAllBrokersConfigsAction method listBrokersConfigs.
@GET
@Produces(MediaType.APPLICATION_JSON)
@PerformanceMetric("v3.brokers.configs.list")
@ResourceName("api.v3.brokers-configs.list")
public void listBrokersConfigs(@Suspended AsyncResponse asyncResponse, @PathParam("clusterId") String clusterId) {
BrokerConfigManager resolvedBrokerConfigManager = brokerConfigManager.get();
CompletableFuture<ListBrokerConfigsResponse> response = brokerManager.get().listBrokers(clusterId).thenCompose(brokers -> resolvedBrokerConfigManager.listAllBrokerConfigs(clusterId, brokers.stream().map(Broker::getBrokerId).collect(Collectors.toList())).thenApply(configs -> ListBrokerConfigsResponse.create(BrokerConfigDataList.builder().setMetadata(ResourceCollection.Metadata.builder().setSelf(urlFactory.create("v3", "clusters", clusterId, "brokers", "-", "configs")).build()).setData(configs.values().stream().flatMap(brokerConfigs -> brokerConfigs.stream().sorted(Comparator.comparing(BrokerConfig::getBrokerId))).map(brokerConfig -> BrokerConfigsResource.toBrokerConfigData(brokerConfig, crnFactory, urlFactory)).collect(Collectors.toList())).build())));
AsyncResponses.asyncResume(asyncResponse, response);
}
use of io.confluent.rest.annotations.PerformanceMetric in project kafka-rest by confluentinc.
the class ConsumerLagsResource method listConsumerLags.
@GET
@Produces(MediaType.APPLICATION_JSON)
@PerformanceMetric("v3.consumer-lags.list")
@ResourceName("api.v3.consumer-lags.list")
public void listConsumerLags(@Suspended AsyncResponse asyncResponse, @PathParam("clusterId") String clusterId, @PathParam("consumerGroupId") String consumerGroupId) {
CompletableFuture<ListConsumerLagsResponse> response = consumerLagManager.get().listConsumerLags(clusterId, consumerGroupId).thenApply(lags -> {
if (lags.isEmpty()) {
throw new NotFoundException("Consumer lags not found.");
}
return lags;
}).thenApply(lags -> ListConsumerLagsResponse.create(ConsumerLagDataList.builder().setMetadata(ResourceCollection.Metadata.builder().setSelf(urlFactory.create("v3", "clusters", clusterId, "consumer-groups", consumerGroupId, "lags")).build()).setData(lags.stream().map(this::toConsumerLagData).sorted(Comparator.comparing(ConsumerLagData::getLag).reversed().thenComparing(ConsumerLagData::getTopicName).thenComparing(ConsumerLagData::getPartitionId)).collect(Collectors.toList())).build()));
AsyncResponses.asyncResume(asyncResponse, response);
}
use of io.confluent.rest.annotations.PerformanceMetric in project schema-registry by confluentinc.
the class SchemasResource method getSchemas.
@GET
@Operation(summary = "Get the schemas.", responses = { @ApiResponse(responseCode = "500", description = "Error code 50001 -- Error in the backend " + "data store\n") })
@PerformanceMetric("schemas.get-schemas")
public List<Schema> getSchemas(@Parameter(description = "Filters results by the respective subject prefix") @DefaultValue("") @QueryParam("subjectPrefix") String subjectPrefix, @Parameter(description = "Whether to return soft deleted schemas") @DefaultValue("false") @QueryParam("deleted") boolean lookupDeletedSchema, @Parameter(description = "Whether to return latest schema versions only for each matching subject") @DefaultValue("false") @QueryParam("latestOnly") boolean latestOnly, @Parameter(description = "Pagination offset for results") @DefaultValue("0") @QueryParam("offset") int offset, @Parameter(description = "Pagination size for results. Ignored if negative") @DefaultValue("-1") @QueryParam("limit") int limit) {
Iterator<Schema> schemas;
List<Schema> filteredSchemas = new ArrayList<>();
String errorMessage = "Error while getting schemas for prefix " + subjectPrefix;
try {
schemas = schemaRegistry.getVersionsWithSubjectPrefix(subjectPrefix, lookupDeletedSchema, latestOnly);
} catch (SchemaRegistryStoreException e) {
throw Errors.storeException(errorMessage, e);
} catch (SchemaRegistryException e) {
throw Errors.schemaRegistryException(errorMessage, e);
}
int fromIndex = offset;
int toIndex = limit > 0 ? offset + limit : Integer.MAX_VALUE;
int index = 0;
while (schemas.hasNext() && index < toIndex) {
Schema schema = schemas.next();
if (index >= offset) {
filteredSchemas.add(schema);
}
index++;
}
return filteredSchemas;
}
Aggregations