use of org.apache.kafka.common.Cluster in project kafka by apache.
the class MetadataTest method testListenerGetsNotifiedOfUpdate.
@Test
public void testListenerGetsNotifiedOfUpdate() {
long time = 0;
final Set<String> topics = new HashSet<>();
metadata.update(Cluster.empty(), Collections.<String>emptySet(), time);
metadata.addListener(new Metadata.Listener() {
@Override
public void onMetadataUpdate(Cluster cluster, Set<String> unavailableTopics) {
topics.clear();
topics.addAll(cluster.topics());
}
});
metadata.update(new Cluster(null, Arrays.asList(new Node(0, "host1", 1000)), Arrays.asList(new PartitionInfo("topic", 0, null, null, null), new PartitionInfo("topic1", 0, null, null, null)), Collections.<String>emptySet(), Collections.<String>emptySet()), Collections.<String>emptySet(), 100);
assertEquals("Listener did not update topics list correctly", new HashSet<>(Arrays.asList("topic", "topic1")), topics);
}
use of org.apache.kafka.common.Cluster in project kafka by apache.
the class KafkaProducerTest method testMetadataFetch.
@PrepareOnlyThisForTest(Metadata.class)
@Test
public void testMetadataFetch() throws Exception {
Properties props = new Properties();
props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
KafkaProducer<String, String> producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer());
Metadata metadata = PowerMock.createNiceMock(Metadata.class);
MemberModifier.field(KafkaProducer.class, "metadata").set(producer, metadata);
String topic = "topic";
ProducerRecord<String, String> record = new ProducerRecord<>(topic, "value");
Collection<Node> nodes = Collections.singletonList(new Node(0, "host1", 1000));
final Cluster emptyCluster = new Cluster(null, nodes, Collections.<PartitionInfo>emptySet(), Collections.<String>emptySet(), Collections.<String>emptySet());
final Cluster cluster = new Cluster("dummy", Collections.singletonList(new Node(0, "host1", 1000)), Arrays.asList(new PartitionInfo(topic, 0, null, null, null)), Collections.<String>emptySet(), Collections.<String>emptySet());
// Expect exactly one fetch for each attempt to refresh while topic metadata is not available
final int refreshAttempts = 5;
EasyMock.expect(metadata.fetch()).andReturn(emptyCluster).times(refreshAttempts - 1);
EasyMock.expect(metadata.fetch()).andReturn(cluster).once();
EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes();
PowerMock.replay(metadata);
producer.send(record);
PowerMock.verify(metadata);
// Expect exactly one fetch if topic metadata is available
PowerMock.reset(metadata);
EasyMock.expect(metadata.fetch()).andReturn(cluster).once();
EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes();
PowerMock.replay(metadata);
producer.send(record, null);
PowerMock.verify(metadata);
// Expect exactly one fetch if topic metadata is available
PowerMock.reset(metadata);
EasyMock.expect(metadata.fetch()).andReturn(cluster).once();
EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes();
PowerMock.replay(metadata);
producer.partitionsFor(topic);
PowerMock.verify(metadata);
}
use of org.apache.kafka.common.Cluster in project kafka by apache.
the class StreamPartitionAssignorTest method shouldSetClusterMetadataOnAssignment.
@Test
public void shouldSetClusterMetadataOnAssignment() throws Exception {
final List<TopicPartition> topic = Collections.singletonList(new TopicPartition("topic", 0));
final Map<HostInfo, Set<TopicPartition>> hostState = Collections.singletonMap(new HostInfo("localhost", 80), Collections.singleton(new TopicPartition("topic", 0)));
final AssignmentInfo assignmentInfo = new AssignmentInfo(Collections.singletonList(new TaskId(0, 0)), Collections.<TaskId, Set<TopicPartition>>emptyMap(), hostState);
partitionAssignor.onAssignment(new PartitionAssignor.Assignment(topic, assignmentInfo.encode()));
final Cluster cluster = partitionAssignor.clusterMetadata();
final List<PartitionInfo> partitionInfos = cluster.partitionsForTopic("topic");
final PartitionInfo partitionInfo = partitionInfos.get(0);
assertEquals(1, partitionInfos.size());
assertEquals("topic", partitionInfo.topic());
assertEquals(0, partitionInfo.partition());
}
use of org.apache.kafka.common.Cluster in project kafka by apache.
the class KafkaProducer method waitOnMetadata.
/**
* Wait for cluster metadata including partitions for the given topic to be available.
* @param topic The topic we want metadata for
* @param partition A specific partition expected to exist in metadata, or null if there's no preference
* @param maxWaitMs The maximum time in ms for waiting on the metadata
* @return The cluster containing topic metadata and the amount of time we waited in ms
*/
private ClusterAndWaitTime waitOnMetadata(String topic, Integer partition, long maxWaitMs) throws InterruptedException {
// add topic to metadata topic list if it is not there already and reset expiry
metadata.add(topic);
Cluster cluster = metadata.fetch();
Integer partitionsCount = cluster.partitionCountForTopic(topic);
// or within the known partition range
if (partitionsCount != null && (partition == null || partition < partitionsCount))
return new ClusterAndWaitTime(cluster, 0);
long begin = time.milliseconds();
long remainingWaitMs = maxWaitMs;
long elapsed;
// is stale and the number of partitions for this topic has increased in the meantime.
do {
log.trace("Requesting metadata update for topic {}.", topic);
int version = metadata.requestUpdate();
sender.wakeup();
try {
metadata.awaitUpdate(version, remainingWaitMs);
} catch (TimeoutException ex) {
// Rethrow with original maxWaitMs to prevent logging exception with remainingWaitMs
throw new TimeoutException("Failed to update metadata after " + maxWaitMs + " ms.");
}
cluster = metadata.fetch();
elapsed = time.milliseconds() - begin;
if (elapsed >= maxWaitMs)
throw new TimeoutException("Failed to update metadata after " + maxWaitMs + " ms.");
if (cluster.unauthorizedTopics().contains(topic))
throw new TopicAuthorizationException(topic);
remainingWaitMs = maxWaitMs - elapsed;
partitionsCount = cluster.partitionCountForTopic(topic);
} while (partitionsCount == null);
if (partition != null && partition >= partitionsCount) {
throw new KafkaException(String.format("Invalid partition given with record: %d is not in the range [0...%d).", partition, partitionsCount));
}
return new ClusterAndWaitTime(cluster, elapsed);
}
use of org.apache.kafka.common.Cluster in project kafka by apache.
the class Sender method run.
/**
* Run a single iteration of sending
*
* @param now
* The current POSIX time in milliseconds
*/
void run(long now) {
Cluster cluster = metadata.fetch();
// get the list of partitions with data ready to send
RecordAccumulator.ReadyCheckResult result = this.accumulator.ready(cluster, now);
// if there are any partitions whose leaders are not known yet, force metadata update
if (!result.unknownLeaderTopics.isEmpty()) {
// and request metadata update, since there are messages to send to the topic.
for (String topic : result.unknownLeaderTopics) this.metadata.add(topic);
this.metadata.requestUpdate();
}
// remove any nodes we aren't ready to send to
Iterator<Node> iter = result.readyNodes.iterator();
long notReadyTimeout = Long.MAX_VALUE;
while (iter.hasNext()) {
Node node = iter.next();
if (!this.client.ready(node, now)) {
iter.remove();
notReadyTimeout = Math.min(notReadyTimeout, this.client.connectionDelay(node, now));
}
}
// create produce requests
Map<Integer, List<ProducerBatch>> batches = this.accumulator.drain(cluster, result.readyNodes, this.maxRequestSize, now);
if (guaranteeMessageOrder) {
// Mute all the partitions drained
for (List<ProducerBatch> batchList : batches.values()) {
for (ProducerBatch batch : batchList) this.accumulator.mutePartition(batch.topicPartition);
}
}
List<ProducerBatch> expiredBatches = this.accumulator.abortExpiredBatches(this.requestTimeout, now);
// update sensors
for (ProducerBatch expiredBatch : expiredBatches) this.sensors.recordErrors(expiredBatch.topicPartition.topic(), expiredBatch.recordCount);
sensors.updateProduceRequestMetrics(batches);
// If we have any nodes that are ready to send + have sendable data, poll with 0 timeout so this can immediately
// loop and try sending more data. Otherwise, the timeout is determined by nodes that have partitions with data
// that isn't yet sendable (e.g. lingering, backing off). Note that this specifically does not include nodes
// with sendable data that aren't ready to send since they would cause busy looping.
long pollTimeout = Math.min(result.nextReadyCheckDelayMs, notReadyTimeout);
if (!result.readyNodes.isEmpty()) {
log.trace("Nodes with data ready to send: {}", result.readyNodes);
pollTimeout = 0;
}
sendProduceRequests(batches, now);
// if some partitions are already ready to be sent, the select time would be 0;
// otherwise if some partition already has some data accumulated but not ready yet,
// the select time will be the time difference between now and its linger expiry time;
// otherwise the select time will be the time difference between now and the metadata expiry time;
this.client.poll(pollTimeout, now);
}
Aggregations