use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.
the class ConsumerCoordinatorTest method setup.
@Before
public void setup() {
this.time = new MockTime();
this.subscriptions = new SubscriptionState(OffsetResetStrategy.EARLIEST);
this.metadata = new Metadata(0, Long.MAX_VALUE, true);
this.metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
this.client = new MockClient(time, metadata);
this.consumerClient = new ConsumerNetworkClient(new LogContext(), client, metadata, time, 100, 1000, Integer.MAX_VALUE);
this.metrics = new Metrics(time);
this.rebalanceListener = new MockRebalanceListener();
this.mockOffsetCommitCallback = new MockCommitCallback();
this.partitionAssignor.clear();
client.setNode(node);
this.coordinator = buildCoordinator(metrics, assignors, ConsumerConfig.DEFAULT_EXCLUDE_INTERNAL_TOPICS, false, true);
}
use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.
the class ConsumerNetworkClientTest method blockWhenPollConditionNotSatisfied.
@Test
public void blockWhenPollConditionNotSatisfied() {
long timeout = 4000L;
NetworkClient mockNetworkClient = EasyMock.mock(NetworkClient.class);
ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(new LogContext(), mockNetworkClient, metadata, time, 100, 1000, Integer.MAX_VALUE);
EasyMock.expect(mockNetworkClient.inFlightRequestCount()).andReturn(1);
EasyMock.expect(mockNetworkClient.poll(EasyMock.eq(timeout), EasyMock.anyLong())).andReturn(Collections.<ClientResponse>emptyList());
EasyMock.replay(mockNetworkClient);
consumerClient.poll(timeout, time.milliseconds(), new ConsumerNetworkClient.PollCondition() {
@Override
public boolean shouldBlock() {
return true;
}
});
EasyMock.verify(mockNetworkClient);
}
use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.
the class ConsumerNetworkClientTest method doNotBlockIfPollConditionIsSatisfied.
@Test
public void doNotBlockIfPollConditionIsSatisfied() {
NetworkClient mockNetworkClient = EasyMock.mock(NetworkClient.class);
ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(new LogContext(), mockNetworkClient, metadata, time, 100, 1000, Integer.MAX_VALUE);
// expect poll, but with no timeout
EasyMock.expect(mockNetworkClient.poll(EasyMock.eq(0L), EasyMock.anyLong())).andReturn(Collections.<ClientResponse>emptyList());
EasyMock.replay(mockNetworkClient);
consumerClient.poll(Long.MAX_VALUE, time.milliseconds(), new ConsumerNetworkClient.PollCondition() {
@Override
public boolean shouldBlock() {
return false;
}
});
EasyMock.verify(mockNetworkClient);
}
use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testQuotaMetrics.
/*
* Send multiple requests. Verify that the client side quota metrics have the right values
*/
@Test
public void testQuotaMetrics() throws Exception {
MockSelector selector = new MockSelector(time);
Sensor throttleTimeSensor = Fetcher.throttleTimeSensor(metrics, metricsRegistry);
Cluster cluster = TestUtils.singletonCluster("test", 1);
Node node = cluster.nodes().get(0);
NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE, 1000, 1000, 64 * 1024, 64 * 1024, 1000, time, true, new ApiVersions(), throttleTimeSensor, new LogContext());
short apiVersionsResponseVersion = ApiKeys.API_VERSIONS.latestVersion();
ByteBuffer buffer = ApiVersionsResponse.createApiVersionsResponse(400, RecordBatch.CURRENT_MAGIC_VALUE).serialize(apiVersionsResponseVersion, new ResponseHeader(0));
selector.delayedReceive(new DelayedReceive(node.idString(), new NetworkReceive(node.idString(), buffer)));
while (!client.ready(node, time.milliseconds())) client.poll(1, time.milliseconds());
selector.clear();
for (int i = 1; i <= 3; i++) {
int throttleTimeMs = 100 * i;
FetchRequest.Builder builder = FetchRequest.Builder.forConsumer(100, 100, new LinkedHashMap<TopicPartition, PartitionData>());
ClientRequest request = client.newClientRequest(node.idString(), builder, time.milliseconds(), true, null);
client.send(request, time.milliseconds());
client.poll(1, time.milliseconds());
FetchResponse response = fullFetchResponse(tp0, nextRecords, Errors.NONE, i, throttleTimeMs);
buffer = response.serialize(ApiKeys.FETCH.latestVersion(), new ResponseHeader(request.correlationId()));
selector.completeReceive(new NetworkReceive(node.idString(), buffer));
client.poll(1, time.milliseconds());
selector.clear();
}
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric avgMetric = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchThrottleTimeAvg));
KafkaMetric maxMetric = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchThrottleTimeMax));
// Throttle times are ApiVersions=400, Fetch=(100, 200, 300)
assertEquals(250, avgMetric.value(), EPSILON);
assertEquals(400, maxMetric.value(), EPSILON);
client.close();
}
use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.
the class SelectorTest method testImmediatelyConnectedCleaned.
@Test
public void testImmediatelyConnectedCleaned() throws Exception {
// new metrics object to avoid metric registration conflicts
Metrics metrics = new Metrics();
Selector selector = new Selector(5000, metrics, time, "MetricGroup", channelBuilder, new LogContext()) {
@Override
protected boolean doConnect(SocketChannel channel, InetSocketAddress address) throws IOException {
// Use a blocking connect to trigger the immediately connected path
channel.configureBlocking(true);
boolean connected = super.doConnect(channel, address);
channel.configureBlocking(false);
return connected;
}
};
try {
testImmediatelyConnectedCleaned(selector, true);
testImmediatelyConnectedCleaned(selector, false);
} finally {
selector.close();
metrics.close();
}
}
Aggregations