use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class KafkaProducerTest method testTopicExpiryInMetadata.
@Test
public void testTopicExpiryInMetadata() throws InterruptedException {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, "30000");
long refreshBackoffMs = 500L;
long metadataExpireMs = 60000L;
long metadataIdleMs = 60000L;
final Time time = new MockTime();
final ProducerMetadata metadata = new ProducerMetadata(refreshBackoffMs, metadataExpireMs, metadataIdleMs, new LogContext(), new ClusterResourceListeners(), time);
final String topic = "topic";
try (KafkaProducer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, new MockClient(time, metadata), null, time)) {
Exchanger<Void> exchanger = new Exchanger<>();
Thread t = new Thread(() -> {
try {
// 1
exchanger.exchange(null);
while (!metadata.updateRequested()) Thread.sleep(100);
MetadataResponse updateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap(topic, 1));
metadata.updateWithCurrentRequestVersion(updateResponse, false, time.milliseconds());
// 2
exchanger.exchange(null);
time.sleep(120 * 1000L);
// Update the metadata again, but it should be expired at this point.
updateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap(topic, 1));
metadata.updateWithCurrentRequestVersion(updateResponse, false, time.milliseconds());
// 3
exchanger.exchange(null);
while (!metadata.updateRequested()) Thread.sleep(100);
time.sleep(30 * 1000L);
} catch (Exception e) {
throw new RuntimeException(e);
}
});
t.start();
// 1
exchanger.exchange(null);
assertNotNull(producer.partitionsFor(topic));
// 2
exchanger.exchange(null);
// 3
exchanger.exchange(null);
assertThrows(TimeoutException.class, () -> producer.partitionsFor(topic));
t.join();
}
}
use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class KafkaProducerTest method testFlushMeasureLatency.
@Test
public void testFlushMeasureLatency() {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
Time time = new MockTime(1);
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1));
ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
MockClient client = new MockClient(time, metadata);
client.updateMetadata(initialUpdateResponse);
try (KafkaProducer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) {
producer.flush();
double first = getMetricValue(producer, "flush-time-ns-total");
assertTrue(first > 0);
producer.flush();
assertTrue(getMetricValue(producer, "flush-time-ns-total") > first);
}
}
use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class KafkaProducerTest method testFlushCompleteSendOfInflightBatches.
@Test
public void testFlushCompleteSendOfInflightBatches() {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
// only test in idempotence disabled producer for simplicity
// flush operation acts the same for idempotence enabled and disabled cases
configs.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, false);
Time time = new MockTime(1);
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1));
ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
MockClient client = new MockClient(time, metadata);
client.updateMetadata(initialUpdateResponse);
try (Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) {
ArrayList<Future<RecordMetadata>> futureResponses = new ArrayList<>();
for (int i = 0; i < 50; i++) {
Future<RecordMetadata> response = producer.send(new ProducerRecord<>("topic", "value" + i));
futureResponses.add(response);
}
futureResponses.forEach(res -> assertFalse(res.isDone()));
producer.flush();
futureResponses.forEach(res -> assertTrue(res.isDone()));
}
}
use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class KafkaProducerTest method testSendTxnOffsetsWithGroupMetadata.
@Test
public void testSendTxnOffsetsWithGroupMetadata() {
final short maxVersion = (short) 3;
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id");
configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 10000);
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
Time time = new MockTime(1);
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1));
ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
MockClient client = new MockClient(time, metadata);
client.updateMetadata(initialUpdateResponse);
client.setNodeApiVersions(NodeApiVersions.create(ApiKeys.TXN_OFFSET_COMMIT.id, (short) 0, maxVersion));
Node node = metadata.fetch().nodes().get(0);
client.throttle(node, 5000);
client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE));
client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE));
client.prepareResponse(addOffsetsToTxnResponse(Errors.NONE));
client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE));
String groupId = "group";
String memberId = "member";
int generationId = 5;
String groupInstanceId = "instance";
client.prepareResponse(request -> {
TxnOffsetCommitRequestData data = ((TxnOffsetCommitRequest) request).data();
return data.groupId().equals(groupId) && data.memberId().equals(memberId) && data.generationId() == generationId && data.groupInstanceId().equals(groupInstanceId);
}, txnOffsetsCommitResponse(Collections.singletonMap(new TopicPartition("topic", 0), Errors.NONE)));
client.prepareResponse(endTxnResponse(Errors.NONE));
try (Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) {
producer.initTransactions();
producer.beginTransaction();
ConsumerGroupMetadata groupMetadata = new ConsumerGroupMetadata(groupId, generationId, memberId, Optional.of(groupInstanceId));
producer.sendOffsetsToTransaction(Collections.emptyMap(), groupMetadata);
producer.commitTransaction();
}
}
use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class KafkaProducerTest method testInitTransactionTimeout.
@Test
public void testInitTransactionTimeout() {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "bad-transaction");
configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 500);
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
Time time = new MockTime(1);
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1));
ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
metadata.updateWithCurrentRequestVersion(initialUpdateResponse, false, time.milliseconds());
MockClient client = new MockClient(time, metadata);
try (Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) {
client.prepareResponse(request -> request instanceof FindCoordinatorRequest && ((FindCoordinatorRequest) request).data().keyType() == FindCoordinatorRequest.CoordinatorType.TRANSACTION.id(), FindCoordinatorResponse.prepareResponse(Errors.NONE, "bad-transaction", NODE));
assertThrows(TimeoutException.class, producer::initTransactions);
client.prepareResponse(request -> request instanceof FindCoordinatorRequest && ((FindCoordinatorRequest) request).data().keyType() == FindCoordinatorRequest.CoordinatorType.TRANSACTION.id(), FindCoordinatorResponse.prepareResponse(Errors.NONE, "bad-transaction", NODE));
client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE));
// retry initialization should work
producer.initTransactions();
}
}
Aggregations