use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.
the class KafkaProducerTest method testInterceptorPartitionSetOnTooLargeRecord.
@Test
public void testInterceptorPartitionSetOnTooLargeRecord() {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
configs.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, "1");
String topic = "topic";
ProducerRecord<String, String> record = new ProducerRecord<>(topic, "value");
long nowMs = Time.SYSTEM.milliseconds();
ProducerMetadata metadata = newMetadata(0, 90000);
metadata.add(topic, nowMs);
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap(topic, 1));
metadata.updateWithCurrentRequestVersion(initialUpdateResponse, false, nowMs);
// it is safe to suppress, since this is a mock class
@SuppressWarnings("unchecked") ProducerInterceptors<String, String> interceptors = mock(ProducerInterceptors.class);
KafkaProducer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, null, interceptors, Time.SYSTEM);
when(interceptors.onSend(any())).then(invocation -> invocation.getArgument(0));
producer.send(record);
verify(interceptors).onSend(record);
verify(interceptors).onSendError(eq(record), notNull(), notNull());
producer.close(Duration.ofMillis(0));
}
use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.
the class KafkaProducerTest method testSendTxnOffsetsWithGroupId.
@Test
public void testSendTxnOffsetsWithGroupId() {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id");
configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 10000);
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
Time time = new MockTime(1);
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1));
ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
MockClient client = new MockClient(time, metadata);
client.updateMetadata(initialUpdateResponse);
Node node = metadata.fetch().nodes().get(0);
client.throttle(node, 5000);
client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE));
client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE));
client.prepareResponse(addOffsetsToTxnResponse(Errors.NONE));
client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE));
String groupId = "group";
client.prepareResponse(request -> ((TxnOffsetCommitRequest) request).data().groupId().equals(groupId), txnOffsetsCommitResponse(Collections.singletonMap(new TopicPartition("topic", 0), Errors.NONE)));
client.prepareResponse(endTxnResponse(Errors.NONE));
try (Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) {
producer.initTransactions();
producer.beginTransaction();
producer.sendOffsetsToTransaction(Collections.emptyMap(), new ConsumerGroupMetadata(groupId));
producer.commitTransaction();
}
}
use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.
the class KafkaProducerTest method doTestHeaders.
private <T extends Serializer<String>> void doTestHeaders(Class<T> serializerClassToMock) {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
Serializer<String> keySerializer = mock(serializerClassToMock);
Serializer<String> valueSerializer = mock(serializerClassToMock);
long nowMs = Time.SYSTEM.milliseconds();
String topic = "topic";
ProducerMetadata metadata = newMetadata(0, 90000);
metadata.add(topic, nowMs);
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap(topic, 1));
metadata.updateWithCurrentRequestVersion(initialUpdateResponse, false, nowMs);
KafkaProducer<String, String> producer = kafkaProducer(configs, keySerializer, valueSerializer, metadata, null, null, Time.SYSTEM);
when(keySerializer.serialize(any(), any(), any())).then(invocation -> invocation.<String>getArgument(2).getBytes());
when(valueSerializer.serialize(any(), any(), any())).then(invocation -> invocation.<String>getArgument(2).getBytes());
String value = "value";
String key = "key";
ProducerRecord<String, String> record = new ProducerRecord<>(topic, key, value);
// ensure headers can be mutated pre send.
record.headers().add(new RecordHeader("test", "header2".getBytes()));
producer.send(record, null);
// ensure headers are closed and cannot be mutated post send
assertThrows(IllegalStateException.class, () -> record.headers().add(new RecordHeader("test", "test".getBytes())));
// ensure existing headers are not changed, and last header for key is still original value
assertArrayEquals(record.headers().lastHeader("test").value(), "header2".getBytes());
verify(valueSerializer).serialize(topic, record.headers(), value);
verify(keySerializer).serialize(topic, record.headers(), key);
producer.close(Duration.ofMillis(0));
}
use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.
the class KafkaProducerTest method testCloseIsForcedOnPendingAddOffsetRequest.
@Test
public void testCloseIsForcedOnPendingAddOffsetRequest() throws InterruptedException {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "this-is-a-transactional-id");
Time time = new MockTime();
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("testTopic", 1));
ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
metadata.updateWithCurrentRequestVersion(initialUpdateResponse, false, time.milliseconds());
MockClient client = new MockClient(time, metadata);
Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time);
ExecutorService executorService = Executors.newSingleThreadExecutor();
CountDownLatch assertionDoneLatch = new CountDownLatch(1);
client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "this-is-a-transactional-id", NODE));
executorService.submit(() -> {
assertThrows(KafkaException.class, producer::initTransactions);
assertionDoneLatch.countDown();
});
client.waitForRequests(1, 2000);
producer.close(Duration.ofMillis(1000));
assertionDoneLatch.await(5000, TimeUnit.MILLISECONDS);
}
use of org.apache.kafka.clients.producer.internals.ProducerMetadata in project kafka by apache.
the class KafkaProducerTest method testTopicRefreshInMetadata.
@Test
public void testTopicRefreshInMetadata() throws InterruptedException {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, "600000");
// test under normal producer for simplicity
configs.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, false);
long refreshBackoffMs = 500L;
long metadataExpireMs = 60000L;
long metadataIdleMs = 60000L;
final Time time = new MockTime();
final ProducerMetadata metadata = new ProducerMetadata(refreshBackoffMs, metadataExpireMs, metadataIdleMs, new LogContext(), new ClusterResourceListeners(), time);
final String topic = "topic";
try (KafkaProducer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, new MockClient(time, metadata), null, time)) {
AtomicBoolean running = new AtomicBoolean(true);
Thread t = new Thread(() -> {
long startTimeMs = System.currentTimeMillis();
while (running.get()) {
while (!metadata.updateRequested() && System.currentTimeMillis() - startTimeMs < 100) Thread.yield();
MetadataResponse updateResponse = RequestTestUtils.metadataUpdateWith("kafka-cluster", 1, singletonMap(topic, Errors.UNKNOWN_TOPIC_OR_PARTITION), emptyMap());
metadata.updateWithCurrentRequestVersion(updateResponse, false, time.milliseconds());
time.sleep(60 * 1000L);
}
});
t.start();
assertThrows(TimeoutException.class, () -> producer.partitionsFor(topic));
running.set(false);
t.join();
}
}
Aggregations