Search in sources :

Example 31 with LogContext

use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.

the class ConsumerNetworkClientTest method sendExpiry.

@Test
public void sendExpiry() throws InterruptedException {
    long unsentExpiryMs = 10;
    final AtomicBoolean isReady = new AtomicBoolean();
    final AtomicBoolean disconnected = new AtomicBoolean();
    client = new MockClient(time) {

        @Override
        public boolean ready(Node node, long now) {
            if (isReady.get())
                return super.ready(node, now);
            else
                return false;
        }

        @Override
        public boolean connectionFailed(Node node) {
            return disconnected.get();
        }
    };
    // Queue first send, sleep long enough for this to expire and then queue second send
    consumerClient = new ConsumerNetworkClient(new LogContext(), client, metadata, time, 100, unsentExpiryMs, Integer.MAX_VALUE);
    RequestFuture<ClientResponse> future1 = consumerClient.send(node, heartbeat());
    assertEquals(1, consumerClient.pendingRequestCount());
    assertEquals(1, consumerClient.pendingRequestCount(node));
    assertFalse(future1.isDone());
    time.sleep(unsentExpiryMs + 1);
    RequestFuture<ClientResponse> future2 = consumerClient.send(node, heartbeat());
    assertEquals(2, consumerClient.pendingRequestCount());
    assertEquals(2, consumerClient.pendingRequestCount(node));
    assertFalse(future2.isDone());
    // First send should have expired and second send still pending
    consumerClient.poll(0);
    assertTrue(future1.isDone());
    assertFalse(future1.succeeded());
    assertEquals(1, consumerClient.pendingRequestCount());
    assertEquals(1, consumerClient.pendingRequestCount(node));
    assertFalse(future2.isDone());
    // Enable send, the un-expired send should succeed on poll
    isReady.set(true);
    client.prepareResponse(heartbeatResponse(Errors.NONE));
    consumerClient.poll(future2);
    ClientResponse clientResponse = future2.value();
    HeartbeatResponse response = (HeartbeatResponse) clientResponse.responseBody();
    assertEquals(Errors.NONE, response.error());
    // Disable ready flag to delay send and queue another send. Disconnection should remove pending send
    isReady.set(false);
    RequestFuture<ClientResponse> future3 = consumerClient.send(node, heartbeat());
    assertEquals(1, consumerClient.pendingRequestCount());
    assertEquals(1, consumerClient.pendingRequestCount(node));
    disconnected.set(true);
    consumerClient.poll(0);
    assertTrue(future3.isDone());
    assertFalse(future3.succeeded());
    assertEquals(0, consumerClient.pendingRequestCount());
    assertEquals(0, consumerClient.pendingRequestCount(node));
}
Also used : ClientResponse(org.apache.kafka.clients.ClientResponse) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HeartbeatResponse(org.apache.kafka.common.requests.HeartbeatResponse) Node(org.apache.kafka.common.Node) LogContext(org.apache.kafka.common.utils.LogContext) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.Test)

Example 32 with LogContext

use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.

the class StreamThread method create.

public static StreamThread create(final InternalTopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final AdminClient adminClient, final UUID processId, final String clientId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes, final StateDirectory stateDirectory, final StateRestoreListener userStateRestoreListener) {
    final String threadClientId = clientId + "-StreamThread-" + STREAM_THREAD_ID_SEQUENCE.getAndIncrement();
    final String logPrefix = String.format("stream-thread [%s] ", threadClientId);
    final LogContext logContext = new LogContext(logPrefix);
    final Logger log = logContext.logger(StreamThread.class);
    log.info("Creating restore consumer client");
    final Map<String, Object> restoreConsumerConfigs = config.getRestoreConsumerConfigs(threadClientId);
    final Consumer<byte[], byte[]> restoreConsumer = clientSupplier.getRestoreConsumer(restoreConsumerConfigs);
    final StoreChangelogReader changelogReader = new StoreChangelogReader(restoreConsumer, userStateRestoreListener, logContext);
    Producer<byte[], byte[]> threadProducer = null;
    final boolean eosEnabled = StreamsConfig.EXACTLY_ONCE.equals(config.getString(StreamsConfig.PROCESSING_GUARANTEE_CONFIG));
    if (!eosEnabled) {
        final Map<String, Object> producerConfigs = config.getProducerConfigs(threadClientId);
        log.info("Creating shared producer client");
        threadProducer = clientSupplier.getProducer(producerConfigs);
    }
    StreamsMetricsThreadImpl streamsMetrics = new StreamsMetricsThreadImpl(metrics, "stream-metrics", "thread." + threadClientId, Collections.singletonMap("client-id", threadClientId));
    final ThreadCache cache = new ThreadCache(logContext, cacheSizeBytes, streamsMetrics);
    final AbstractTaskCreator<StreamTask> activeTaskCreator = new TaskCreator(builder, config, streamsMetrics, stateDirectory, streamsMetrics.taskCreatedSensor, changelogReader, cache, time, clientSupplier, threadProducer, threadClientId, log);
    final AbstractTaskCreator<StandbyTask> standbyTaskCreator = new StandbyTaskCreator(builder, config, streamsMetrics, stateDirectory, streamsMetrics.taskCreatedSensor, changelogReader, time, log);
    TaskManager taskManager = new TaskManager(changelogReader, processId, logPrefix, restoreConsumer, streamsMetadataState, activeTaskCreator, standbyTaskCreator, adminClient, new AssignedStreamsTasks(logContext), new AssignedStandbyTasks(logContext));
    log.info("Creating consumer client");
    final String applicationId = config.getString(StreamsConfig.APPLICATION_ID_CONFIG);
    final Map<String, Object> consumerConfigs = config.getConsumerConfigs(applicationId, threadClientId);
    consumerConfigs.put(StreamsConfig.InternalConfig.TASK_MANAGER_FOR_PARTITION_ASSIGNOR, taskManager);
    String originalReset = null;
    if (!builder.latestResetTopicsPattern().pattern().equals("") || !builder.earliestResetTopicsPattern().pattern().equals("")) {
        originalReset = (String) consumerConfigs.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG);
        consumerConfigs.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "none");
    }
    final Consumer<byte[], byte[]> consumer = clientSupplier.getConsumer(consumerConfigs);
    taskManager.setConsumer(consumer);
    return new StreamThread(time, config, restoreConsumer, consumer, originalReset, taskManager, streamsMetrics, builder, threadClientId, logContext);
}
Also used : LogContext(org.apache.kafka.common.utils.LogContext) Logger(org.slf4j.Logger) ThreadCache(org.apache.kafka.streams.state.internals.ThreadCache)

Example 33 with LogContext

use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.

the class KafkaConsumerTest method newConsumer.

private KafkaConsumer<String, String> newConsumer(Time time, KafkaClient client, Metadata metadata, PartitionAssignor assignor, OffsetResetStrategy resetStrategy, boolean autoCommitEnabled) {
    String clientId = "mock-consumer";
    String groupId = "mock-group";
    String metricGroupPrefix = "consumer";
    long retryBackoffMs = 100;
    long requestTimeoutMs = 30000;
    boolean excludeInternalTopics = true;
    int minBytes = 1;
    int maxBytes = Integer.MAX_VALUE;
    int maxWaitMs = 500;
    int fetchSize = 1024 * 1024;
    int maxPollRecords = Integer.MAX_VALUE;
    boolean checkCrcs = true;
    int rebalanceTimeoutMs = 60000;
    Deserializer<String> keyDeserializer = new StringDeserializer();
    Deserializer<String> valueDeserializer = new StringDeserializer();
    List<PartitionAssignor> assignors = singletonList(assignor);
    ConsumerInterceptors<String, String> interceptors = new ConsumerInterceptors<>(Collections.<ConsumerInterceptor<String, String>>emptyList());
    Metrics metrics = new Metrics();
    ConsumerMetrics metricsRegistry = new ConsumerMetrics(metricGroupPrefix);
    SubscriptionState subscriptions = new SubscriptionState(resetStrategy);
    LogContext loggerFactory = new LogContext();
    ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(loggerFactory, client, metadata, time, retryBackoffMs, requestTimeoutMs, heartbeatIntervalMs);
    ConsumerCoordinator consumerCoordinator = new ConsumerCoordinator(loggerFactory, consumerClient, groupId, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, assignors, metadata, subscriptions, metrics, metricGroupPrefix, time, retryBackoffMs, autoCommitEnabled, autoCommitIntervalMs, interceptors, excludeInternalTopics, true);
    Fetcher<String, String> fetcher = new Fetcher<>(loggerFactory, consumerClient, minBytes, maxBytes, maxWaitMs, fetchSize, maxPollRecords, checkCrcs, keyDeserializer, valueDeserializer, metadata, subscriptions, metrics, metricsRegistry.fetcherMetrics, time, retryBackoffMs, requestTimeoutMs, IsolationLevel.READ_UNCOMMITTED);
    return new KafkaConsumer<>(loggerFactory, clientId, consumerCoordinator, keyDeserializer, valueDeserializer, fetcher, interceptors, time, consumerClient, metrics, subscriptions, metadata, retryBackoffMs, requestTimeoutMs, assignors);
}
Also used : ConsumerInterceptors(org.apache.kafka.clients.consumer.internals.ConsumerInterceptors) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) ConsumerCoordinator(org.apache.kafka.clients.consumer.internals.ConsumerCoordinator) LogContext(org.apache.kafka.common.utils.LogContext) ConsumerMetrics(org.apache.kafka.clients.consumer.internals.ConsumerMetrics) Metrics(org.apache.kafka.common.metrics.Metrics) ConsumerNetworkClient(org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient) SubscriptionState(org.apache.kafka.clients.consumer.internals.SubscriptionState) Fetcher(org.apache.kafka.clients.consumer.internals.Fetcher) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) ConsumerMetrics(org.apache.kafka.clients.consumer.internals.ConsumerMetrics)

Example 34 with LogContext

use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.

the class SegmentIteratorTest method before.

@Before
public void before() {
    context = new InternalMockProcessorContext(TestUtils.tempDirectory(), Serdes.String(), Serdes.String(), new NoOpRecordCollector(), new ThreadCache(new LogContext("testCache "), 0, new MockStreamsMetrics(new Metrics())));
    segmentOne.openDB(context);
    segmentTwo.openDB(context);
    segmentOne.put(Bytes.wrap("a".getBytes()), "1".getBytes());
    segmentOne.put(Bytes.wrap("b".getBytes()), "2".getBytes());
    segmentTwo.put(Bytes.wrap("c".getBytes()), "3".getBytes());
    segmentTwo.put(Bytes.wrap("d".getBytes()), "4".getBytes());
}
Also used : MockStreamsMetrics(org.apache.kafka.streams.processor.internals.MockStreamsMetrics) Metrics(org.apache.kafka.common.metrics.Metrics) NoOpRecordCollector(org.apache.kafka.test.NoOpRecordCollector) LogContext(org.apache.kafka.common.utils.LogContext) MockStreamsMetrics(org.apache.kafka.streams.processor.internals.MockStreamsMetrics) InternalMockProcessorContext(org.apache.kafka.test.InternalMockProcessorContext) Before(org.junit.Before)

Example 35 with LogContext

use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.

the class MeteredWindowStoreTest method setUp.

@Before
public void setUp() throws Exception {
    final Metrics metrics = new Metrics();
    final StreamsMetrics streamsMetrics = new StreamsMetrics() {

        @Override
        public Map<MetricName, ? extends Metric> metrics() {
            return Collections.unmodifiableMap(metrics.metrics());
        }

        @Override
        public Sensor addLatencyAndThroughputSensor(String scopeName, String entityName, String operationName, Sensor.RecordingLevel recordLevel, String... tags) {
            return metrics.sensor(operationName);
        }

        @Override
        public void recordLatency(final Sensor sensor, final long startNs, final long endNs) {
            latencyRecorded.add(sensor.name());
        }

        @Override
        public Sensor addThroughputSensor(String scopeName, String entityName, String operationName, Sensor.RecordingLevel recordLevel, String... tags) {
            return metrics.sensor(operationName);
        }

        @Override
        public void recordThroughput(Sensor sensor, long value) {
            throughputRecorded.add(sensor.name());
        }

        @Override
        public void removeSensor(Sensor sensor) {
            metrics.removeSensor(sensor.name());
        }

        @Override
        public Sensor addSensor(String name, Sensor.RecordingLevel recordLevel) {
            return metrics.sensor(name);
        }

        @Override
        public Sensor addSensor(String name, Sensor.RecordingLevel recordLevel, Sensor... parents) {
            return metrics.sensor(name);
        }
    };
    context = new InternalMockProcessorContext(TestUtils.tempDirectory(), Serdes.String(), Serdes.Long(), new NoOpRecordCollector(), new ThreadCache(new LogContext("testCache "), 0, streamsMetrics)) {

        @Override
        public StreamsMetrics metrics() {
            return streamsMetrics;
        }
    };
    EasyMock.expect(innerStoreMock.name()).andReturn("store").anyTimes();
}
Also used : MetricName(org.apache.kafka.common.MetricName) Metrics(org.apache.kafka.common.metrics.Metrics) StreamsMetrics(org.apache.kafka.streams.StreamsMetrics) NoOpRecordCollector(org.apache.kafka.test.NoOpRecordCollector) LogContext(org.apache.kafka.common.utils.LogContext) InternalMockProcessorContext(org.apache.kafka.test.InternalMockProcessorContext) StreamsMetrics(org.apache.kafka.streams.StreamsMetrics) Sensor(org.apache.kafka.common.metrics.Sensor) Before(org.junit.Before)

Aggregations

LogContext (org.apache.kafka.common.utils.LogContext)53 Metrics (org.apache.kafka.common.metrics.Metrics)28 Test (org.junit.Test)27 Before (org.junit.Before)17 InternalMockProcessorContext (org.apache.kafka.test.InternalMockProcessorContext)14 MockStreamsMetrics (org.apache.kafka.streams.processor.internals.MockStreamsMetrics)13 NoOpRecordCollector (org.apache.kafka.test.NoOpRecordCollector)10 HashMap (java.util.HashMap)8 MockTime (org.apache.kafka.common.utils.MockTime)8 InetSocketAddress (java.net.InetSocketAddress)7 Properties (java.util.Properties)7 StreamsConfig (org.apache.kafka.streams.StreamsConfig)7 TopicPartition (org.apache.kafka.common.TopicPartition)6 File (java.io.File)5 NetworkClient (org.apache.kafka.clients.NetworkClient)5 InternalStreamsBuilderTest (org.apache.kafka.streams.kstream.internals.InternalStreamsBuilderTest)5 ServerSocketChannel (java.nio.channels.ServerSocketChannel)4 SocketChannel (java.nio.channels.SocketChannel)4 Metadata (org.apache.kafka.clients.Metadata)4 MockClient (org.apache.kafka.clients.MockClient)4