use of org.apache.kafka.common.Cluster in project kafka by apache.
the class KafkaConsumerTest method verifyNoCoordinatorLookupForManualAssignmentWithSeek.
@Test
public void verifyNoCoordinatorLookupForManualAssignmentWithSeek() {
int rebalanceTimeoutMs = 60000;
int sessionTimeoutMs = 3000;
int heartbeatIntervalMs = 2000;
int autoCommitIntervalMs = 1000;
Time time = new MockTime();
Cluster cluster = TestUtils.singletonCluster(topic, 1);
Node node = cluster.nodes().get(0);
Metadata metadata = new Metadata(0, Long.MAX_VALUE);
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RoundRobinAssignor();
final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
consumer.assign(Arrays.asList(tp0));
consumer.seekToBeginning(Arrays.asList(tp0));
// there shouldn't be any need to lookup the coordinator or fetch committed offsets.
// we just lookup the starting position and send the record fetch.
client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 50L), Errors.NONE));
client.prepareResponse(fetchResponse(tp0, 50L, 5));
ConsumerRecords<String, String> records = consumer.poll(0);
assertEquals(5, records.count());
assertEquals(55L, consumer.position(tp0));
}
use of org.apache.kafka.common.Cluster in project kafka by apache.
the class AbstractCoordinatorTest method setupCoordinator.
@Before
public void setupCoordinator() {
this.mockTime = new MockTime();
this.mockClient = new MockClient(mockTime);
Metadata metadata = new Metadata();
this.consumerClient = new ConsumerNetworkClient(mockClient, metadata, mockTime, RETRY_BACKOFF_MS, REQUEST_TIMEOUT_MS);
Metrics metrics = new Metrics();
Cluster cluster = TestUtils.singletonCluster("topic", 1);
metadata.update(cluster, Collections.<String>emptySet(), mockTime.milliseconds());
this.node = cluster.nodes().get(0);
mockClient.setNode(node);
this.coordinatorNode = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
this.coordinator = new DummyCoordinator(consumerClient, metrics, mockTime);
}
use of org.apache.kafka.common.Cluster in project kafka by apache.
the class MetadataTest method testListenerCanUnregister.
@Test
public void testListenerCanUnregister() {
long time = 0;
final Set<String> topics = new HashSet<>();
metadata.update(Cluster.empty(), Collections.<String>emptySet(), time);
final Metadata.Listener listener = new Metadata.Listener() {
@Override
public void onMetadataUpdate(Cluster cluster, Set<String> unavailableTopics) {
topics.clear();
topics.addAll(cluster.topics());
}
};
metadata.addListener(listener);
metadata.update(new Cluster("cluster", Collections.singletonList(new Node(0, "host1", 1000)), Arrays.asList(new PartitionInfo("topic", 0, null, null, null), new PartitionInfo("topic1", 0, null, null, null)), Collections.<String>emptySet(), Collections.<String>emptySet()), Collections.<String>emptySet(), 100);
metadata.removeListener(listener);
metadata.update(new Cluster("cluster", Arrays.asList(new Node(0, "host1", 1000)), Arrays.asList(new PartitionInfo("topic2", 0, null, null, null), new PartitionInfo("topic3", 0, null, null, null)), Collections.<String>emptySet(), Collections.<String>emptySet()), Collections.<String>emptySet(), 100);
assertEquals("Listener did not update topics list correctly", new HashSet<>(Arrays.asList("topic", "topic1")), topics);
}
use of org.apache.kafka.common.Cluster in project kafka by apache.
the class MetadataTest method testClusterListenerGetsNotifiedOfUpdate.
@Test
public void testClusterListenerGetsNotifiedOfUpdate() {
long time = 0;
MockClusterResourceListener mockClusterListener = new MockClusterResourceListener();
ClusterResourceListeners listeners = new ClusterResourceListeners();
listeners.maybeAdd(mockClusterListener);
metadata = new Metadata(refreshBackoffMs, metadataExpireMs, false, listeners);
String hostName = "www.example.com";
Cluster cluster = Cluster.bootstrap(Arrays.asList(new InetSocketAddress(hostName, 9002)));
metadata.update(cluster, Collections.<String>emptySet(), time);
assertFalse("ClusterResourceListener should not called when metadata is updated with bootstrap Cluster", MockClusterResourceListener.IS_ON_UPDATE_CALLED.get());
metadata.update(new Cluster("dummy", Arrays.asList(new Node(0, "host1", 1000)), Arrays.asList(new PartitionInfo("topic", 0, null, null, null), new PartitionInfo("topic1", 0, null, null, null)), Collections.<String>emptySet(), Collections.<String>emptySet()), Collections.<String>emptySet(), 100);
assertEquals("MockClusterResourceListener did not get cluster metadata correctly", "dummy", mockClusterListener.clusterResource().clusterId());
assertTrue("MockClusterResourceListener should be called when metadata is updated with non-bootstrap Cluster", MockClusterResourceListener.IS_ON_UPDATE_CALLED.get());
}
use of org.apache.kafka.common.Cluster in project kafka by apache.
the class StreamPartitionAssignorTest method testAssignEmptyMetadata.
@Test
public void testAssignEmptyMetadata() throws Exception {
builder.addSource("source1", "topic1");
builder.addSource("source2", "topic2");
builder.addProcessor("processor", new MockProcessorSupplier(), "source1", "source2");
List<String> topics = Utils.mkList("topic1", "topic2");
Set<TaskId> allTasks = Utils.mkSet(task0, task1, task2);
final Set<TaskId> prevTasks10 = Utils.mkSet(task0);
final Set<TaskId> standbyTasks10 = Utils.mkSet(task1);
final Cluster emptyMetadata = new Cluster("cluster", Collections.singletonList(Node.noNode()), Collections.<PartitionInfo>emptySet(), Collections.<String>emptySet(), Collections.<String>emptySet());
UUID uuid1 = UUID.randomUUID();
String client1 = "client1";
StreamThread thread10 = new StreamThread(builder, config, new MockClientSupplier(), "test", client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
partitionAssignor.configure(config.getConsumerConfigs(thread10, "test", client1));
Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
subscriptions.put("consumer10", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, prevTasks10, standbyTasks10, userEndPoint).encode()));
// initially metadata is empty
Map<String, PartitionAssignor.Assignment> assignments = partitionAssignor.assign(emptyMetadata, subscriptions);
// check assigned partitions
assertEquals(Collections.<TopicPartition>emptySet(), new HashSet<>(assignments.get("consumer10").partitions()));
// check assignment info
Set<TaskId> allActiveTasks = new HashSet<>();
AssignmentInfo info10 = checkAssignment(Collections.<String>emptySet(), assignments.get("consumer10"));
allActiveTasks.addAll(info10.activeTasks);
assertEquals(0, allActiveTasks.size());
assertEquals(Collections.<TaskId>emptySet(), new HashSet<>(allActiveTasks));
// then metadata gets populated
assignments = partitionAssignor.assign(metadata, subscriptions);
// check assigned partitions
assertEquals(Utils.mkSet(Utils.mkSet(t1p0, t2p0, t1p0, t2p0, t1p1, t2p1, t1p2, t2p2)), Utils.mkSet(new HashSet<>(assignments.get("consumer10").partitions())));
// the first consumer
info10 = checkAssignment(allTopics, assignments.get("consumer10"));
allActiveTasks.addAll(info10.activeTasks);
assertEquals(3, allActiveTasks.size());
assertEquals(allTasks, new HashSet<>(allActiveTasks));
assertEquals(3, allActiveTasks.size());
assertEquals(allTasks, allActiveTasks);
}
Aggregations