use of org.apache.kafka.common.metrics.Metrics in project kafka by apache.
the class KafkaConsumerTest method newConsumer.
private KafkaConsumer<String, String> newConsumer(Time time, KafkaClient client, Metadata metadata, PartitionAssignor assignor, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, boolean autoCommitEnabled, int autoCommitIntervalMs) {
// create a consumer with mocked time and mocked network
String clientId = "mock-consumer";
String groupId = "mock-group";
String metricGroupPrefix = "consumer";
long retryBackoffMs = 100;
long requestTimeoutMs = 30000;
boolean excludeInternalTopics = true;
int minBytes = 1;
int maxBytes = Integer.MAX_VALUE;
int maxWaitMs = 500;
int fetchSize = 1024 * 1024;
int maxPollRecords = Integer.MAX_VALUE;
boolean checkCrcs = true;
Deserializer<String> keyDeserializer = new StringDeserializer();
Deserializer<String> valueDeserializer = new StringDeserializer();
OffsetResetStrategy autoResetStrategy = OffsetResetStrategy.EARLIEST;
List<PartitionAssignor> assignors = Arrays.asList(assignor);
ConsumerInterceptors<String, String> interceptors = null;
Metrics metrics = new Metrics();
SubscriptionState subscriptions = new SubscriptionState(autoResetStrategy);
ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(client, metadata, time, retryBackoffMs, requestTimeoutMs);
ConsumerCoordinator consumerCoordinator = new ConsumerCoordinator(consumerClient, groupId, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, assignors, metadata, subscriptions, metrics, metricGroupPrefix, time, retryBackoffMs, autoCommitEnabled, autoCommitIntervalMs, interceptors, excludeInternalTopics);
Fetcher<String, String> fetcher = new Fetcher<>(consumerClient, minBytes, maxBytes, maxWaitMs, fetchSize, maxPollRecords, checkCrcs, keyDeserializer, valueDeserializer, metadata, subscriptions, metrics, metricGroupPrefix, time, retryBackoffMs);
return new KafkaConsumer<>(clientId, consumerCoordinator, keyDeserializer, valueDeserializer, fetcher, interceptors, time, consumerClient, metrics, subscriptions, metadata, retryBackoffMs, requestTimeoutMs);
}
use of org.apache.kafka.common.metrics.Metrics in project kafka by apache.
the class SenderTest method setup.
@Before
public void setup() {
Map<String, String> metricTags = new LinkedHashMap<>();
metricTags.put("client-id", CLIENT_ID);
MetricConfig metricConfig = new MetricConfig().tags(metricTags);
metrics = new Metrics(metricConfig, time);
accumulator = new RecordAccumulator(batchSize, 1024 * 1024, CompressionType.NONE, 0L, 0L, metrics, time);
sender = new Sender(client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, MAX_RETRIES, metrics, time, REQUEST_TIMEOUT);
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
}
use of org.apache.kafka.common.metrics.Metrics in project kafka by apache.
the class SenderTest method testSendInOrder.
@Test
public void testSendInOrder() throws Exception {
int maxRetries = 1;
Metrics m = new Metrics();
try {
Sender sender = new Sender(client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, m, time, REQUEST_TIMEOUT);
// Create a two broker cluster, with partition 0 on broker 0 and partition 1 on broker 1
Cluster cluster1 = TestUtils.clusterWith(2, "test", 2);
metadata.update(cluster1, Collections.<String>emptySet(), time.milliseconds());
// Send the first message.
TopicPartition tp2 = new TopicPartition("test", 1);
accumulator.append(tp2, 0L, "key1".getBytes(), "value1".getBytes(), null, MAX_BLOCK_TIMEOUT);
// connect
sender.run(time.milliseconds());
// send produce request
sender.run(time.milliseconds());
String id = client.requests().peek().destination();
assertEquals(ApiKeys.PRODUCE, client.requests().peek().requestBuilder().apiKey());
Node node = new Node(Integer.parseInt(id), "localhost", 0);
assertEquals(1, client.inFlightRequestCount());
assertTrue(client.hasInFlightRequests());
assertTrue("Client ready status should be true", client.isReady(node, 0L));
time.sleep(900);
// Now send another message to tp2
accumulator.append(tp2, 0L, "key2".getBytes(), "value2".getBytes(), null, MAX_BLOCK_TIMEOUT);
// Update metadata before sender receives response from broker 0. Now partition 2 moves to broker 0
Cluster cluster2 = TestUtils.singletonCluster("test", 2);
metadata.update(cluster2, Collections.<String>emptySet(), time.milliseconds());
// Sender should not send the second message to node 0.
sender.run(time.milliseconds());
assertEquals(1, client.inFlightRequestCount());
assertTrue(client.hasInFlightRequests());
} finally {
m.close();
}
}
use of org.apache.kafka.common.metrics.Metrics in project kafka by apache.
the class SenderTest method testRetries.
@Test
public void testRetries() throws Exception {
// create a sender with retries = 1
int maxRetries = 1;
Metrics m = new Metrics();
try {
Sender sender = new Sender(client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, m, time, REQUEST_TIMEOUT);
// do a successful retry
Future<RecordMetadata> future = accumulator.append(tp, 0L, "key".getBytes(), "value".getBytes(), null, MAX_BLOCK_TIMEOUT).future;
// connect
sender.run(time.milliseconds());
// send produce request
sender.run(time.milliseconds());
String id = client.requests().peek().destination();
Node node = new Node(Integer.parseInt(id), "localhost", 0);
assertEquals(1, client.inFlightRequestCount());
assertTrue(client.hasInFlightRequests());
assertTrue("Client ready status should be true", client.isReady(node, 0L));
client.disconnect(id);
assertEquals(0, client.inFlightRequestCount());
assertFalse(client.hasInFlightRequests());
assertFalse("Client ready status should be false", client.isReady(node, 0L));
// receive error
sender.run(time.milliseconds());
// reconnect
sender.run(time.milliseconds());
// resend
sender.run(time.milliseconds());
assertEquals(1, client.inFlightRequestCount());
assertTrue(client.hasInFlightRequests());
long offset = 0;
client.respond(produceResponse(tp, offset, Errors.NONE, 0));
sender.run(time.milliseconds());
assertTrue("Request should have retried and completed", future.isDone());
assertEquals(offset, future.get().offset());
// do an unsuccessful retry
future = accumulator.append(tp, 0L, "key".getBytes(), "value".getBytes(), null, MAX_BLOCK_TIMEOUT).future;
// send produce request
sender.run(time.milliseconds());
for (int i = 0; i < maxRetries + 1; i++) {
client.disconnect(client.requests().peek().destination());
// receive error
sender.run(time.milliseconds());
// reconnect
sender.run(time.milliseconds());
// resend
sender.run(time.milliseconds());
}
sender.run(time.milliseconds());
completedWithError(future, Errors.NETWORK_EXCEPTION);
} finally {
m.close();
}
}
use of org.apache.kafka.common.metrics.Metrics in project kafka by apache.
the class FetcherTest method testFetchedRecordsRaisesOnSerializationErrors.
@Test
public void testFetchedRecordsRaisesOnSerializationErrors() {
// raise an exception from somewhere in the middle of the fetch response
// so that we can verify that our position does not advance after raising
ByteArrayDeserializer deserializer = new ByteArrayDeserializer() {
int i = 0;
@Override
public byte[] deserialize(String topic, byte[] data) {
if (i++ == 1)
throw new SerializationException();
return data;
}
};
Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(time), deserializer, deserializer);
subscriptions.assignFromUser(singleton(tp));
subscriptions.seek(tp, 1);
client.prepareResponse(matchesOffset(tp, 1), fetchResponse(this.records, Errors.NONE, 100L, 0));
assertEquals(1, fetcher.sendFetches());
consumerClient.poll(0);
try {
fetcher.fetchedRecords();
fail("fetchedRecords should have raised");
} catch (SerializationException e) {
// the position should not advance since no data has been returned
assertEquals(1, subscriptions.position(tp).longValue());
}
}
Aggregations