use of org.apache.kafka.common.Cluster in project kafka by apache.
the class KafkaProducerTest method testMetadataFetchOnStaleMetadata.
@PrepareOnlyThisForTest(Metadata.class)
@Test
public void testMetadataFetchOnStaleMetadata() throws Exception {
Properties props = new Properties();
props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
KafkaProducer<String, String> producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer());
Metadata metadata = PowerMock.createNiceMock(Metadata.class);
MemberModifier.field(KafkaProducer.class, "metadata").set(producer, metadata);
String topic = "topic";
ProducerRecord<String, String> initialRecord = new ProducerRecord<>(topic, "value");
// Create a record with a partition higher than the initial (outdated) partition range
ProducerRecord<String, String> extendedRecord = new ProducerRecord<>(topic, 2, null, "value");
Collection<Node> nodes = Collections.singletonList(new Node(0, "host1", 1000));
final Cluster emptyCluster = new Cluster(null, nodes, Collections.<PartitionInfo>emptySet(), Collections.<String>emptySet(), Collections.<String>emptySet());
final Cluster initialCluster = new Cluster("dummy", Collections.singletonList(new Node(0, "host1", 1000)), Arrays.asList(new PartitionInfo(topic, 0, null, null, null)), Collections.<String>emptySet(), Collections.<String>emptySet());
final Cluster extendedCluster = new Cluster("dummy", Collections.singletonList(new Node(0, "host1", 1000)), Arrays.asList(new PartitionInfo(topic, 0, null, null, null), new PartitionInfo(topic, 1, null, null, null), new PartitionInfo(topic, 2, null, null, null)), Collections.<String>emptySet(), Collections.<String>emptySet());
// Expect exactly one fetch for each attempt to refresh while topic metadata is not available
final int refreshAttempts = 5;
EasyMock.expect(metadata.fetch()).andReturn(emptyCluster).times(refreshAttempts - 1);
EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once();
EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes();
PowerMock.replay(metadata);
producer.send(initialRecord);
PowerMock.verify(metadata);
// Expect exactly one fetch if topic metadata is available and records are still within range
PowerMock.reset(metadata);
EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once();
EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes();
PowerMock.replay(metadata);
producer.send(initialRecord, null);
PowerMock.verify(metadata);
// Expect exactly two fetches if topic metadata is available but metadata response still returns
// the same partition size (either because metadata are still stale at the broker too or because
// there weren't any partitions added in the first place).
PowerMock.reset(metadata);
EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once();
EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once();
EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes();
PowerMock.replay(metadata);
try {
producer.send(extendedRecord, null);
fail("Expected KafkaException to be raised");
} catch (KafkaException e) {
// expected
}
PowerMock.verify(metadata);
// Expect exactly two fetches if topic metadata is available but outdated for the given record
PowerMock.reset(metadata);
EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once();
EasyMock.expect(metadata.fetch()).andReturn(extendedCluster).once();
EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes();
PowerMock.replay(metadata);
producer.send(extendedRecord, null);
PowerMock.verify(metadata);
}
use of org.apache.kafka.common.Cluster in project kafka by apache.
the class KafkaConsumerTest method testCommitsFetchedDuringAssign.
@Test
public void testCommitsFetchedDuringAssign() {
long offset1 = 10000;
long offset2 = 20000;
int rebalanceTimeoutMs = 6000;
int sessionTimeoutMs = 3000;
int heartbeatIntervalMs = 2000;
int autoCommitIntervalMs = 1000;
Time time = new MockTime();
Cluster cluster = TestUtils.singletonCluster(topic, 1);
Node node = cluster.nodes().get(0);
Metadata metadata = new Metadata(0, Long.MAX_VALUE);
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RoundRobinAssignor();
final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
consumer.assign(singletonList(tp0));
// lookup coordinator
client.prepareResponseFrom(new GroupCoordinatorResponse(Errors.NONE, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
// fetch offset for one topic
client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, offset1), Errors.NONE), coordinator);
assertEquals(offset1, consumer.committed(tp0).offset());
consumer.assign(Arrays.asList(tp0, tp1));
// fetch offset for two topics
Map<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(tp0, offset1);
offsets.put(tp1, offset2);
client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator);
assertEquals(offset1, consumer.committed(tp0).offset());
assertEquals(offset2, consumer.committed(tp1).offset());
}
use of org.apache.kafka.common.Cluster in project kafka by apache.
the class KafkaConsumerTest method testAutoCommitSentBeforePositionUpdate.
@Test
public void testAutoCommitSentBeforePositionUpdate() {
int rebalanceTimeoutMs = 60000;
int sessionTimeoutMs = 30000;
int heartbeatIntervalMs = 3000;
// adjust auto commit interval lower than heartbeat so we don't need to deal with
// a concurrent heartbeat request
int autoCommitIntervalMs = 1000;
Time time = new MockTime();
Cluster cluster = TestUtils.singletonCluster(topic, 1);
Node node = cluster.nodes().get(0);
Metadata metadata = new Metadata(0, Long.MAX_VALUE);
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RoundRobinAssignor();
final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
consumer.subscribe(Arrays.asList(topic), getConsumerRebalanceListener(consumer));
Node coordinator = prepareRebalance(client, node, assignor, Arrays.asList(tp0), null);
consumer.poll(0);
// respond to the outstanding fetch so that we have data available on the next poll
client.respondFrom(fetchResponse(tp0, 0, 5), node);
client.poll(0, time.milliseconds());
time.sleep(autoCommitIntervalMs);
client.prepareResponseFrom(fetchResponse(tp0, 5, 0), node);
// no data has been returned to the user yet, so the committed offset should be 0
AtomicBoolean commitReceived = prepareOffsetCommitResponse(client, coordinator, tp0, 0);
consumer.poll(0);
assertTrue(commitReceived.get());
}
use of org.apache.kafka.common.Cluster in project kafka by apache.
the class KafkaConsumerTest method verifyNoCoordinatorLookupForManualAssignmentWithSeek.
@Test
public void verifyNoCoordinatorLookupForManualAssignmentWithSeek() {
int rebalanceTimeoutMs = 60000;
int sessionTimeoutMs = 3000;
int heartbeatIntervalMs = 2000;
int autoCommitIntervalMs = 1000;
Time time = new MockTime();
Cluster cluster = TestUtils.singletonCluster(topic, 1);
Node node = cluster.nodes().get(0);
Metadata metadata = new Metadata(0, Long.MAX_VALUE);
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RoundRobinAssignor();
final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
consumer.assign(Arrays.asList(tp0));
consumer.seekToBeginning(Arrays.asList(tp0));
// there shouldn't be any need to lookup the coordinator or fetch committed offsets.
// we just lookup the starting position and send the record fetch.
client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 50L), Errors.NONE));
client.prepareResponse(fetchResponse(tp0, 50L, 5));
ConsumerRecords<String, String> records = consumer.poll(0);
assertEquals(5, records.count());
assertEquals(55L, consumer.position(tp0));
}
use of org.apache.kafka.common.Cluster in project kafka by apache.
the class AbstractCoordinatorTest method setupCoordinator.
@Before
public void setupCoordinator() {
this.mockTime = new MockTime();
this.mockClient = new MockClient(mockTime);
Metadata metadata = new Metadata();
this.consumerClient = new ConsumerNetworkClient(mockClient, metadata, mockTime, RETRY_BACKOFF_MS, REQUEST_TIMEOUT_MS);
Metrics metrics = new Metrics();
Cluster cluster = TestUtils.singletonCluster("topic", 1);
metadata.update(cluster, Collections.<String>emptySet(), mockTime.milliseconds());
this.node = cluster.nodes().get(0);
mockClient.setNode(node);
this.coordinatorNode = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
this.coordinator = new DummyCoordinator(consumerClient, metrics, mockTime);
}
Aggregations