use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.
the class ConsumerNetworkClientTest method sendExpiry.
@Test
public void sendExpiry() throws InterruptedException {
long unsentExpiryMs = 10;
final AtomicBoolean isReady = new AtomicBoolean();
final AtomicBoolean disconnected = new AtomicBoolean();
client = new MockClient(time) {
@Override
public boolean ready(Node node, long now) {
if (isReady.get())
return super.ready(node, now);
else
return false;
}
@Override
public boolean connectionFailed(Node node) {
return disconnected.get();
}
};
// Queue first send, sleep long enough for this to expire and then queue second send
consumerClient = new ConsumerNetworkClient(new LogContext(), client, metadata, time, 100, unsentExpiryMs, Integer.MAX_VALUE);
RequestFuture<ClientResponse> future1 = consumerClient.send(node, heartbeat());
assertEquals(1, consumerClient.pendingRequestCount());
assertEquals(1, consumerClient.pendingRequestCount(node));
assertFalse(future1.isDone());
time.sleep(unsentExpiryMs + 1);
RequestFuture<ClientResponse> future2 = consumerClient.send(node, heartbeat());
assertEquals(2, consumerClient.pendingRequestCount());
assertEquals(2, consumerClient.pendingRequestCount(node));
assertFalse(future2.isDone());
// First send should have expired and second send still pending
consumerClient.poll(0);
assertTrue(future1.isDone());
assertFalse(future1.succeeded());
assertEquals(1, consumerClient.pendingRequestCount());
assertEquals(1, consumerClient.pendingRequestCount(node));
assertFalse(future2.isDone());
// Enable send, the un-expired send should succeed on poll
isReady.set(true);
client.prepareResponse(heartbeatResponse(Errors.NONE));
consumerClient.poll(future2);
ClientResponse clientResponse = future2.value();
HeartbeatResponse response = (HeartbeatResponse) clientResponse.responseBody();
assertEquals(Errors.NONE, response.error());
// Disable ready flag to delay send and queue another send. Disconnection should remove pending send
isReady.set(false);
RequestFuture<ClientResponse> future3 = consumerClient.send(node, heartbeat());
assertEquals(1, consumerClient.pendingRequestCount());
assertEquals(1, consumerClient.pendingRequestCount(node));
disconnected.set(true);
consumerClient.poll(0);
assertTrue(future3.isDone());
assertFalse(future3.succeeded());
assertEquals(0, consumerClient.pendingRequestCount());
assertEquals(0, consumerClient.pendingRequestCount(node));
}
use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.
the class StreamThread method create.
public static StreamThread create(final InternalTopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final AdminClient adminClient, final UUID processId, final String clientId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes, final StateDirectory stateDirectory, final StateRestoreListener userStateRestoreListener) {
final String threadClientId = clientId + "-StreamThread-" + STREAM_THREAD_ID_SEQUENCE.getAndIncrement();
final String logPrefix = String.format("stream-thread [%s] ", threadClientId);
final LogContext logContext = new LogContext(logPrefix);
final Logger log = logContext.logger(StreamThread.class);
log.info("Creating restore consumer client");
final Map<String, Object> restoreConsumerConfigs = config.getRestoreConsumerConfigs(threadClientId);
final Consumer<byte[], byte[]> restoreConsumer = clientSupplier.getRestoreConsumer(restoreConsumerConfigs);
final StoreChangelogReader changelogReader = new StoreChangelogReader(restoreConsumer, userStateRestoreListener, logContext);
Producer<byte[], byte[]> threadProducer = null;
final boolean eosEnabled = StreamsConfig.EXACTLY_ONCE.equals(config.getString(StreamsConfig.PROCESSING_GUARANTEE_CONFIG));
if (!eosEnabled) {
final Map<String, Object> producerConfigs = config.getProducerConfigs(threadClientId);
log.info("Creating shared producer client");
threadProducer = clientSupplier.getProducer(producerConfigs);
}
StreamsMetricsThreadImpl streamsMetrics = new StreamsMetricsThreadImpl(metrics, "stream-metrics", "thread." + threadClientId, Collections.singletonMap("client-id", threadClientId));
final ThreadCache cache = new ThreadCache(logContext, cacheSizeBytes, streamsMetrics);
final AbstractTaskCreator<StreamTask> activeTaskCreator = new TaskCreator(builder, config, streamsMetrics, stateDirectory, streamsMetrics.taskCreatedSensor, changelogReader, cache, time, clientSupplier, threadProducer, threadClientId, log);
final AbstractTaskCreator<StandbyTask> standbyTaskCreator = new StandbyTaskCreator(builder, config, streamsMetrics, stateDirectory, streamsMetrics.taskCreatedSensor, changelogReader, time, log);
TaskManager taskManager = new TaskManager(changelogReader, processId, logPrefix, restoreConsumer, streamsMetadataState, activeTaskCreator, standbyTaskCreator, adminClient, new AssignedStreamsTasks(logContext), new AssignedStandbyTasks(logContext));
log.info("Creating consumer client");
final String applicationId = config.getString(StreamsConfig.APPLICATION_ID_CONFIG);
final Map<String, Object> consumerConfigs = config.getConsumerConfigs(applicationId, threadClientId);
consumerConfigs.put(StreamsConfig.InternalConfig.TASK_MANAGER_FOR_PARTITION_ASSIGNOR, taskManager);
String originalReset = null;
if (!builder.latestResetTopicsPattern().pattern().equals("") || !builder.earliestResetTopicsPattern().pattern().equals("")) {
originalReset = (String) consumerConfigs.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG);
consumerConfigs.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "none");
}
final Consumer<byte[], byte[]> consumer = clientSupplier.getConsumer(consumerConfigs);
taskManager.setConsumer(consumer);
return new StreamThread(time, config, restoreConsumer, consumer, originalReset, taskManager, streamsMetrics, builder, threadClientId, logContext);
}
use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.
the class KafkaConsumerTest method newConsumer.
private KafkaConsumer<String, String> newConsumer(Time time, KafkaClient client, Metadata metadata, PartitionAssignor assignor, OffsetResetStrategy resetStrategy, boolean autoCommitEnabled) {
String clientId = "mock-consumer";
String groupId = "mock-group";
String metricGroupPrefix = "consumer";
long retryBackoffMs = 100;
long requestTimeoutMs = 30000;
boolean excludeInternalTopics = true;
int minBytes = 1;
int maxBytes = Integer.MAX_VALUE;
int maxWaitMs = 500;
int fetchSize = 1024 * 1024;
int maxPollRecords = Integer.MAX_VALUE;
boolean checkCrcs = true;
int rebalanceTimeoutMs = 60000;
Deserializer<String> keyDeserializer = new StringDeserializer();
Deserializer<String> valueDeserializer = new StringDeserializer();
List<PartitionAssignor> assignors = singletonList(assignor);
ConsumerInterceptors<String, String> interceptors = new ConsumerInterceptors<>(Collections.<ConsumerInterceptor<String, String>>emptyList());
Metrics metrics = new Metrics();
ConsumerMetrics metricsRegistry = new ConsumerMetrics(metricGroupPrefix);
SubscriptionState subscriptions = new SubscriptionState(resetStrategy);
LogContext loggerFactory = new LogContext();
ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(loggerFactory, client, metadata, time, retryBackoffMs, requestTimeoutMs, heartbeatIntervalMs);
ConsumerCoordinator consumerCoordinator = new ConsumerCoordinator(loggerFactory, consumerClient, groupId, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, assignors, metadata, subscriptions, metrics, metricGroupPrefix, time, retryBackoffMs, autoCommitEnabled, autoCommitIntervalMs, interceptors, excludeInternalTopics, true);
Fetcher<String, String> fetcher = new Fetcher<>(loggerFactory, consumerClient, minBytes, maxBytes, maxWaitMs, fetchSize, maxPollRecords, checkCrcs, keyDeserializer, valueDeserializer, metadata, subscriptions, metrics, metricsRegistry.fetcherMetrics, time, retryBackoffMs, requestTimeoutMs, IsolationLevel.READ_UNCOMMITTED);
return new KafkaConsumer<>(loggerFactory, clientId, consumerCoordinator, keyDeserializer, valueDeserializer, fetcher, interceptors, time, consumerClient, metrics, subscriptions, metadata, retryBackoffMs, requestTimeoutMs, assignors);
}
use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.
the class SegmentIteratorTest method before.
@Before
public void before() {
context = new InternalMockProcessorContext(TestUtils.tempDirectory(), Serdes.String(), Serdes.String(), new NoOpRecordCollector(), new ThreadCache(new LogContext("testCache "), 0, new MockStreamsMetrics(new Metrics())));
segmentOne.openDB(context);
segmentTwo.openDB(context);
segmentOne.put(Bytes.wrap("a".getBytes()), "1".getBytes());
segmentOne.put(Bytes.wrap("b".getBytes()), "2".getBytes());
segmentTwo.put(Bytes.wrap("c".getBytes()), "3".getBytes());
segmentTwo.put(Bytes.wrap("d".getBytes()), "4".getBytes());
}
use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.
the class MeteredWindowStoreTest method setUp.
@Before
public void setUp() throws Exception {
final Metrics metrics = new Metrics();
final StreamsMetrics streamsMetrics = new StreamsMetrics() {
@Override
public Map<MetricName, ? extends Metric> metrics() {
return Collections.unmodifiableMap(metrics.metrics());
}
@Override
public Sensor addLatencyAndThroughputSensor(String scopeName, String entityName, String operationName, Sensor.RecordingLevel recordLevel, String... tags) {
return metrics.sensor(operationName);
}
@Override
public void recordLatency(final Sensor sensor, final long startNs, final long endNs) {
latencyRecorded.add(sensor.name());
}
@Override
public Sensor addThroughputSensor(String scopeName, String entityName, String operationName, Sensor.RecordingLevel recordLevel, String... tags) {
return metrics.sensor(operationName);
}
@Override
public void recordThroughput(Sensor sensor, long value) {
throughputRecorded.add(sensor.name());
}
@Override
public void removeSensor(Sensor sensor) {
metrics.removeSensor(sensor.name());
}
@Override
public Sensor addSensor(String name, Sensor.RecordingLevel recordLevel) {
return metrics.sensor(name);
}
@Override
public Sensor addSensor(String name, Sensor.RecordingLevel recordLevel, Sensor... parents) {
return metrics.sensor(name);
}
};
context = new InternalMockProcessorContext(TestUtils.tempDirectory(), Serdes.String(), Serdes.Long(), new NoOpRecordCollector(), new ThreadCache(new LogContext("testCache "), 0, streamsMetrics)) {
@Override
public StreamsMetrics metrics() {
return streamsMetrics;
}
};
EasyMock.expect(innerStoreMock.name()).andReturn("store").anyTimes();
}
Aggregations