use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class KafkaConsumerTest method testEnforceRebalanceTriggersRebalanceOnNextPoll.
@Test
public void testEnforceRebalanceTriggersRebalanceOnNextPoll() {
Time time = new MockTime(1L);
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupInstanceId);
MockRebalanceListener countingRebalanceListener = new MockRebalanceListener();
initMetadata(client, Utils.mkMap(Utils.mkEntry(topic, 1), Utils.mkEntry(topic2, 1), Utils.mkEntry(topic3, 1)));
consumer.subscribe(Arrays.asList(topic, topic2), countingRebalanceListener);
Node node = metadata.fetch().nodes().get(0);
prepareRebalance(client, node, assignor, Arrays.asList(tp0, t2p0), null);
// a first rebalance to get the assignment, we need two poll calls since we need two round trips to finish join / sync-group
consumer.poll(Duration.ZERO);
consumer.poll(Duration.ZERO);
// onPartitionsRevoked is not invoked when first joining the group
assertEquals(countingRebalanceListener.revokedCount, 0);
assertEquals(countingRebalanceListener.assignedCount, 1);
consumer.enforceRebalance();
// the next poll should trigger a rebalance
consumer.poll(Duration.ZERO);
assertEquals(countingRebalanceListener.revokedCount, 1);
}
use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class KTableSuppressProcessorMetricsTest method shouldRecordMetricsWithBuiltInMetricsVersionLatest.
@Test
public void shouldRecordMetricsWithBuiltInMetricsVersionLatest() {
final String storeName = "test-store";
final StateStore buffer = new InMemoryTimeOrderedKeyValueBuffer.Builder<>(storeName, Serdes.String(), Serdes.Long()).withLoggingDisabled().build();
final KTableImpl<String, ?, Long> mock = EasyMock.mock(KTableImpl.class);
final Processor<String, Change<Long>, String, Change<Long>> processor = new KTableSuppressProcessorSupplier<>((SuppressedInternal<String>) Suppressed.<String>untilTimeLimit(Duration.ofDays(100), maxRecords(1)), storeName, mock).get();
streamsConfig.setProperty(StreamsConfig.BUILT_IN_METRICS_VERSION_CONFIG, StreamsConfig.METRICS_LATEST);
final MockInternalNewProcessorContext<String, Change<Long>> context = new MockInternalNewProcessorContext<>(streamsConfig, TASK_ID, TestUtils.tempDirectory());
final Time time = new SystemTime();
context.setCurrentNode(new ProcessorNode("testNode"));
context.setSystemTimeMs(time.milliseconds());
buffer.init((StateStoreContext) context, buffer);
processor.init(context);
final long timestamp = 100L;
context.setRecordMetadata("", 0, 0L);
context.setTimestamp(timestamp);
final String key = "longKey";
final Change<Long> value = new Change<>(null, ARBITRARY_LONG);
processor.process(new Record<>(key, value, timestamp));
final MetricName evictionRateMetric = evictionRateMetricLatest;
final MetricName evictionTotalMetric = evictionTotalMetricLatest;
final MetricName bufferSizeAvgMetric = bufferSizeAvgMetricLatest;
final MetricName bufferSizeMaxMetric = bufferSizeMaxMetricLatest;
final MetricName bufferCountAvgMetric = bufferCountAvgMetricLatest;
final MetricName bufferCountMaxMetric = bufferCountMaxMetricLatest;
{
final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
verifyMetric(metrics, evictionRateMetric, is(0.0));
verifyMetric(metrics, evictionTotalMetric, is(0.0));
verifyMetric(metrics, bufferSizeAvgMetric, is(21.5));
verifyMetric(metrics, bufferSizeMaxMetric, is(43.0));
verifyMetric(metrics, bufferCountAvgMetric, is(0.5));
verifyMetric(metrics, bufferCountMaxMetric, is(1.0));
}
context.setRecordMetadata("", 0, 1L);
context.setTimestamp(timestamp + 1);
processor.process(new Record<>("key", value, timestamp + 1));
{
final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
verifyMetric(metrics, evictionRateMetric, greaterThan(0.0));
verifyMetric(metrics, evictionTotalMetric, is(1.0));
verifyMetric(metrics, bufferSizeAvgMetric, is(41.0));
verifyMetric(metrics, bufferSizeMaxMetric, is(82.0));
verifyMetric(metrics, bufferCountAvgMetric, is(1.0));
verifyMetric(metrics, bufferCountMaxMetric, is(2.0));
}
}
use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class KafkaProducerTest method testTransactionalMethodThrowsWhenSenderClosed.
@Test
public void testTransactionalMethodThrowsWhenSenderClosed() {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "this-is-a-transactional-id");
Time time = new MockTime();
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, emptyMap());
ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
metadata.updateWithCurrentRequestVersion(initialUpdateResponse, false, time.milliseconds());
MockClient client = new MockClient(time, metadata);
Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time);
producer.close();
assertThrows(IllegalStateException.class, producer::initTransactions);
}
use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class KafkaProducerTest method testCallbackHandlesError.
@Test
public void testCallbackHandlesError() throws Exception {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, "1000");
Time time = new MockTime();
ProducerMetadata producerMetadata = newMetadata(0, Long.MAX_VALUE);
MockClient client = new MockClient(time, producerMetadata);
// Invalid topic name due to space
String invalidTopicName = "topic abc";
try (Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), producerMetadata, client, null, time)) {
ProducerRecord<String, String> record = new ProducerRecord<>(invalidTopicName, "HelloKafka");
// Here's the important piece of the test. Let's make sure that the RecordMetadata we get
// is non-null and adheres to the onCompletion contract.
Callback callBack = (recordMetadata, exception) -> {
assertNotNull(exception);
assertNotNull(recordMetadata);
assertNotNull(recordMetadata.topic(), "Topic name should be valid even on send failure");
assertEquals(invalidTopicName, recordMetadata.topic());
assertNotNull(recordMetadata.partition(), "Partition should be valid even on send failure");
assertFalse(recordMetadata.hasOffset());
assertEquals(ProduceResponse.INVALID_OFFSET, recordMetadata.offset());
assertFalse(recordMetadata.hasTimestamp());
assertEquals(RecordBatch.NO_TIMESTAMP, recordMetadata.timestamp());
assertEquals(-1, recordMetadata.serializedKeySize());
assertEquals(-1, recordMetadata.serializedValueSize());
assertEquals(-1, recordMetadata.partition());
};
producer.send(record, callBack);
}
}
use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class KafkaProducerTest method testAbortTransaction.
@Test
public void testAbortTransaction() {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id");
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
Time time = new MockTime(1);
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1));
ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
MockClient client = new MockClient(time, metadata);
client.updateMetadata(initialUpdateResponse);
client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE));
client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE));
client.prepareResponse(endTxnResponse(Errors.NONE));
try (Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) {
producer.initTransactions();
producer.beginTransaction();
producer.abortTransaction();
}
}
Aggregations