use of io.confluent.kafkarest.entities.v3.ListReplicasResponse in project kafka-rest by confluentinc.
the class ReplicasResourceIntegrationTest method listReplicas_existingPartition_returnsReplicas.
@Test
public void listReplicas_existingPartition_returnsReplicas() {
String baseUrl = restConnect;
String clusterId = getClusterId();
ListReplicasResponse expected = ListReplicasResponse.create(ReplicaDataList.builder().setMetadata(ResourceCollection.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + TOPIC_NAME + "/partitions/0/replicas").build()).setData(singletonList(ReplicaData.builder().setMetadata(Resource.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/topics/" + TOPIC_NAME + "/partitions/0/replicas/0").setResourceName("crn:///kafka=" + clusterId + "/topic=" + TOPIC_NAME + "/partition=0/replica=0").build()).setClusterId(clusterId).setTopicName(TOPIC_NAME).setPartitionId(0).setBrokerId(0).setLeader(true).setInSync(true).setBroker(Resource.Relationship.create(baseUrl + "/v3/clusters/" + clusterId + "/brokers/0")).build())).build());
Response response = request("/v3/clusters/" + clusterId + "/topics/" + TOPIC_NAME + "/partitions/0/replicas").accept(MediaType.APPLICATION_JSON).get();
assertEquals(Status.OK.getStatusCode(), response.getStatus());
ListReplicasResponse actual = response.readEntity(ListReplicasResponse.class);
assertEquals(expected, actual);
}
use of io.confluent.kafkarest.entities.v3.ListReplicasResponse in project kafka-rest by confluentinc.
the class ReplicasResourceTest method listReplicas_existingPartition_returnsReplicas.
@Test
public void listReplicas_existingPartition_returnsReplicas() {
expect(replicaManager.listReplicas(CLUSTER_ID, TOPIC_NAME, PARTITION_ID)).andReturn(completedFuture(Arrays.asList(REPLICA_1, REPLICA_2, REPLICA_3)));
replay(replicaManager);
FakeAsyncResponse response = new FakeAsyncResponse();
replicasResource.listReplicas(response, CLUSTER_ID, TOPIC_NAME, PARTITION_ID);
ListReplicasResponse expected = ListReplicasResponse.create(ReplicaDataList.builder().setMetadata(ResourceCollection.Metadata.builder().setSelf("/v3/clusters/cluster-1/topics/topic-1/partitions/0/replicas").build()).setData(Arrays.asList(ReplicaData.builder().setMetadata(Resource.Metadata.builder().setSelf("/v3/clusters/cluster-1/topics/topic-1/partitions/0" + "/replicas/1").setResourceName("crn:///kafka=cluster-1/topic=topic-1/partition=0" + "/replica=1").build()).setClusterId(CLUSTER_ID).setTopicName(TOPIC_NAME).setPartitionId(PARTITION_ID).setBrokerId(REPLICA_1.getBrokerId()).setLeader(true).setInSync(true).setBroker(Resource.Relationship.create("/v3/clusters/cluster-1/brokers/1")).build(), ReplicaData.builder().setMetadata(Resource.Metadata.builder().setSelf("/v3/clusters/cluster-1/topics/topic-1/partitions/0" + "/replicas/2").setResourceName("crn:///kafka=cluster-1/topic=topic-1/partition=0" + "/replica=2").build()).setClusterId(CLUSTER_ID).setTopicName(TOPIC_NAME).setPartitionId(PARTITION_ID).setBrokerId(REPLICA_2.getBrokerId()).setLeader(false).setInSync(true).setBroker(Resource.Relationship.create("/v3/clusters/cluster-1/brokers/2")).build(), ReplicaData.builder().setMetadata(Resource.Metadata.builder().setSelf("/v3/clusters/cluster-1/topics/topic-1/partitions/0" + "/replicas/3").setResourceName("crn:///kafka=cluster-1/topic=topic-1/partition=0" + "/replica=3").build()).setClusterId(CLUSTER_ID).setTopicName(TOPIC_NAME).setPartitionId(PARTITION_ID).setBrokerId(REPLICA_3.getBrokerId()).setLeader(false).setInSync(false).setBroker(Resource.Relationship.create("/v3/clusters/cluster-1/brokers/3")).build())).build());
assertEquals(expected, response.getValue());
}
Aggregations