Search in sources :

Example 36 with LogContext

use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.

the class StreamsPartitionAssignor method configure.

/**
 * We need to have the PartitionAssignor and its StreamThread to be mutually accessible
 * since the former needs later's cached metadata while sending subscriptions,
 * and the latter needs former's returned assignment when adding tasks.
 * @throws KafkaException if the stream thread is not specified
 */
@Override
public void configure(final Map<String, ?> configs) {
    final StreamsConfig streamsConfig = new StreamsConfig(configs);
    // Setting the logger with the passed in client thread name
    logPrefix = String.format("stream-thread [%s] ", streamsConfig.getString(CommonClientConfigs.CLIENT_ID_CONFIG));
    final LogContext logContext = new LogContext(logPrefix);
    log = logContext.logger(getClass());
    final Object o = configs.get(StreamsConfig.InternalConfig.TASK_MANAGER_FOR_PARTITION_ASSIGNOR);
    if (o == null) {
        final KafkaException fatalException = new KafkaException("TaskManager is not specified");
        log.error(fatalException.getMessage(), fatalException);
        throw fatalException;
    }
    if (!(o instanceof TaskManager)) {
        final KafkaException fatalException = new KafkaException(String.format("%s is not an instance of %s", o.getClass().getName(), TaskManager.class.getName()));
        log.error(fatalException.getMessage(), fatalException);
        throw fatalException;
    }
    taskManager = (TaskManager) o;
    numStandbyReplicas = streamsConfig.getInt(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG);
    partitionGrouper = streamsConfig.getConfiguredInstance(StreamsConfig.PARTITION_GROUPER_CLASS_CONFIG, PartitionGrouper.class);
    final String userEndPoint = streamsConfig.getString(StreamsConfig.APPLICATION_SERVER_CONFIG);
    if (userEndPoint != null && !userEndPoint.isEmpty()) {
        try {
            final String host = getHost(userEndPoint);
            final Integer port = getPort(userEndPoint);
            if (host == null || port == null)
                throw new ConfigException(String.format("%s Config %s isn't in the correct format. Expected a host:port pair" + " but received %s", logPrefix, StreamsConfig.APPLICATION_SERVER_CONFIG, userEndPoint));
        } catch (final NumberFormatException nfe) {
            throw new ConfigException(String.format("%s Invalid port supplied in %s for config %s", logPrefix, userEndPoint, StreamsConfig.APPLICATION_SERVER_CONFIG));
        }
        this.userEndPoint = userEndPoint;
    }
    internalTopicManager = new InternalTopicManager(taskManager.adminClient, streamsConfig);
    copartitionedTopicsValidator = new CopartitionedTopicsValidator(logPrefix);
}
Also used : LogContext(org.apache.kafka.common.utils.LogContext) PartitionGrouper(org.apache.kafka.streams.processor.PartitionGrouper) KafkaException(org.apache.kafka.common.KafkaException) ConfigException(org.apache.kafka.common.config.ConfigException) StreamsConfig(org.apache.kafka.streams.StreamsConfig)

Example 37 with LogContext

use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.

the class WorkerCoordinatorTest method setup.

@Before
public void setup() {
    LogContext loggerFactory = new LogContext();
    this.time = new MockTime();
    this.client = new MockClient(time);
    this.metadata = new Metadata(0, Long.MAX_VALUE, true);
    this.metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
    this.consumerClient = new ConsumerNetworkClient(loggerFactory, client, metadata, time, 100, 1000, heartbeatIntervalMs);
    this.metrics = new Metrics(time);
    this.rebalanceListener = new MockRebalanceListener();
    this.configStorage = PowerMock.createMock(KafkaConfigBackingStore.class);
    client.setNode(node);
    this.coordinator = new WorkerCoordinator(loggerFactory, consumerClient, groupId, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, metrics, "consumer" + groupId, time, retryBackoffMs, LEADER_URL, configStorage, rebalanceListener);
    configState1 = new ClusterConfigState(1L, Collections.singletonMap(connectorId1, 1), Collections.singletonMap(connectorId1, (Map<String, String>) new HashMap<String, String>()), Collections.singletonMap(connectorId1, TargetState.STARTED), Collections.singletonMap(taskId1x0, (Map<String, String>) new HashMap<String, String>()), Collections.<String>emptySet());
    Map<String, Integer> configState2ConnectorTaskCounts = new HashMap<>();
    configState2ConnectorTaskCounts.put(connectorId1, 2);
    configState2ConnectorTaskCounts.put(connectorId2, 1);
    Map<String, Map<String, String>> configState2ConnectorConfigs = new HashMap<>();
    configState2ConnectorConfigs.put(connectorId1, new HashMap<String, String>());
    configState2ConnectorConfigs.put(connectorId2, new HashMap<String, String>());
    Map<String, TargetState> configState2TargetStates = new HashMap<>();
    configState2TargetStates.put(connectorId1, TargetState.STARTED);
    configState2TargetStates.put(connectorId2, TargetState.STARTED);
    Map<ConnectorTaskId, Map<String, String>> configState2TaskConfigs = new HashMap<>();
    configState2TaskConfigs.put(taskId1x0, new HashMap<String, String>());
    configState2TaskConfigs.put(taskId1x1, new HashMap<String, String>());
    configState2TaskConfigs.put(taskId2x0, new HashMap<String, String>());
    configState2 = new ClusterConfigState(2L, configState2ConnectorTaskCounts, configState2ConnectorConfigs, configState2TargetStates, configState2TaskConfigs, Collections.<String>emptySet());
    Map<String, Integer> configStateSingleTaskConnectorsConnectorTaskCounts = new HashMap<>();
    configStateSingleTaskConnectorsConnectorTaskCounts.put(connectorId1, 1);
    configStateSingleTaskConnectorsConnectorTaskCounts.put(connectorId2, 1);
    configStateSingleTaskConnectorsConnectorTaskCounts.put(connectorId3, 1);
    Map<String, Map<String, String>> configStateSingleTaskConnectorsConnectorConfigs = new HashMap<>();
    configStateSingleTaskConnectorsConnectorConfigs.put(connectorId1, new HashMap<String, String>());
    configStateSingleTaskConnectorsConnectorConfigs.put(connectorId2, new HashMap<String, String>());
    configStateSingleTaskConnectorsConnectorConfigs.put(connectorId3, new HashMap<String, String>());
    Map<String, TargetState> configStateSingleTaskConnectorsTargetStates = new HashMap<>();
    configStateSingleTaskConnectorsTargetStates.put(connectorId1, TargetState.STARTED);
    configStateSingleTaskConnectorsTargetStates.put(connectorId2, TargetState.STARTED);
    configStateSingleTaskConnectorsTargetStates.put(connectorId3, TargetState.STARTED);
    Map<ConnectorTaskId, Map<String, String>> configStateSingleTaskConnectorsTaskConfigs = new HashMap<>();
    configStateSingleTaskConnectorsTaskConfigs.put(taskId1x0, new HashMap<String, String>());
    configStateSingleTaskConnectorsTaskConfigs.put(taskId2x0, new HashMap<String, String>());
    configStateSingleTaskConnectorsTaskConfigs.put(taskId3x0, new HashMap<String, String>());
    configStateSingleTaskConnectors = new ClusterConfigState(2L, configStateSingleTaskConnectorsConnectorTaskCounts, configStateSingleTaskConnectorsConnectorConfigs, configStateSingleTaskConnectorsTargetStates, configStateSingleTaskConnectorsTaskConfigs, Collections.<String>emptySet());
}
Also used : TargetState(org.apache.kafka.connect.runtime.TargetState) ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) HashMap(java.util.HashMap) ProtocolMetadata(org.apache.kafka.common.requests.JoinGroupRequest.ProtocolMetadata) Metadata(org.apache.kafka.clients.Metadata) LogContext(org.apache.kafka.common.utils.LogContext) KafkaConfigBackingStore(org.apache.kafka.connect.storage.KafkaConfigBackingStore) Metrics(org.apache.kafka.common.metrics.Metrics) ConsumerNetworkClient(org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient) HashMap(java.util.HashMap) Map(java.util.Map) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) Before(org.junit.Before)

Example 38 with LogContext

use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.

the class KafkaAdminClient method createInternal.

static KafkaAdminClient createInternal(AdminClientConfig config, TimeoutProcessorFactory timeoutProcessorFactory) {
    Metrics metrics = null;
    NetworkClient networkClient = null;
    Time time = Time.SYSTEM;
    String clientId = generateClientId(config);
    ChannelBuilder channelBuilder = null;
    Selector selector = null;
    ApiVersions apiVersions = new ApiVersions();
    LogContext logContext = createLogContext(clientId);
    try {
        // Since we only request node information, it's safe to pass true for allowAutoTopicCreation (and it
        // simplifies communication with older brokers)
        Metadata metadata = new Metadata(config.getLong(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG), config.getLong(AdminClientConfig.METADATA_MAX_AGE_CONFIG), true);
        List<MetricsReporter> reporters = config.getConfiguredInstances(AdminClientConfig.METRIC_REPORTER_CLASSES_CONFIG, MetricsReporter.class);
        Map<String, String> metricTags = Collections.singletonMap("client-id", clientId);
        MetricConfig metricConfig = new MetricConfig().samples(config.getInt(AdminClientConfig.METRICS_NUM_SAMPLES_CONFIG)).timeWindow(config.getLong(AdminClientConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS).recordLevel(Sensor.RecordingLevel.forName(config.getString(AdminClientConfig.METRICS_RECORDING_LEVEL_CONFIG))).tags(metricTags);
        reporters.add(new JmxReporter(JMX_PREFIX));
        metrics = new Metrics(metricConfig, reporters, time);
        String metricGrpPrefix = "admin-client";
        channelBuilder = ClientUtils.createChannelBuilder(config);
        selector = new Selector(config.getLong(AdminClientConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG), metrics, time, metricGrpPrefix, channelBuilder, logContext);
        networkClient = new NetworkClient(selector, metadata, clientId, 1, config.getLong(AdminClientConfig.RECONNECT_BACKOFF_MS_CONFIG), config.getLong(AdminClientConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG), config.getInt(AdminClientConfig.SEND_BUFFER_CONFIG), config.getInt(AdminClientConfig.RECEIVE_BUFFER_CONFIG), (int) TimeUnit.HOURS.toMillis(1), time, true, apiVersions, logContext);
        return new KafkaAdminClient(config, clientId, time, metadata, metrics, networkClient, timeoutProcessorFactory, logContext);
    } catch (Throwable exc) {
        closeQuietly(metrics, "Metrics");
        closeQuietly(networkClient, "NetworkClient");
        closeQuietly(selector, "Selector");
        closeQuietly(channelBuilder, "ChannelBuilder");
        throw new KafkaException("Failed create new KafkaAdminClient", exc);
    }
}
Also used : MetricConfig(org.apache.kafka.common.metrics.MetricConfig) Metadata(org.apache.kafka.clients.Metadata) LogContext(org.apache.kafka.common.utils.LogContext) Time(org.apache.kafka.common.utils.Time) JmxReporter(org.apache.kafka.common.metrics.JmxReporter) Metrics(org.apache.kafka.common.metrics.Metrics) NetworkClient(org.apache.kafka.clients.NetworkClient) MetricsReporter(org.apache.kafka.common.metrics.MetricsReporter) ApiVersions(org.apache.kafka.clients.ApiVersions) KafkaException(org.apache.kafka.common.KafkaException) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) Selector(org.apache.kafka.common.network.Selector)

Example 39 with LogContext

use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.

the class StreamThreadTest method shouldShutdownTaskManagerOnClose.

@SuppressWarnings("unchecked")
@Test
public void shouldShutdownTaskManagerOnClose() {
    final Consumer<byte[], byte[]> consumer = EasyMock.createNiceMock(Consumer.class);
    final TaskManager taskManager = EasyMock.createNiceMock(TaskManager.class);
    EasyMock.expect(taskManager.activeTasks()).andReturn(Collections.<TaskId, StreamTask>emptyMap());
    EasyMock.expect(taskManager.standbyTasks()).andReturn(Collections.<TaskId, StandbyTask>emptyMap());
    taskManager.shutdown(true);
    EasyMock.expectLastCall();
    EasyMock.replay(taskManager, consumer);
    final StreamThread.StreamsMetricsThreadImpl streamsMetrics = new StreamThread.StreamsMetricsThreadImpl(metrics, "", "", Collections.<String, String>emptyMap());
    final StreamThread thread = new StreamThread(mockTime, config, consumer, consumer, null, taskManager, streamsMetrics, internalTopologyBuilder, clientId, new LogContext(""));
    thread.setStateListener(new StreamThread.StateListener() {

        @Override
        public void onChange(final Thread t, final ThreadStateTransitionValidator newState, final ThreadStateTransitionValidator oldState) {
            if (oldState == StreamThread.State.CREATED && newState == StreamThread.State.RUNNING) {
                thread.shutdown();
            }
        }
    });
    thread.run();
    EasyMock.verify(taskManager);
}
Also used : LogContext(org.apache.kafka.common.utils.LogContext) InternalStreamsBuilderTest(org.apache.kafka.streams.kstream.internals.InternalStreamsBuilderTest) Test(org.junit.Test)

Example 40 with LogContext

use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.

the class CachingSessionStoreTest method setUp.

@Before
public void setUp() {
    final SessionKeySchema schema = new SessionKeySchema();
    schema.init("topic");
    final int retention = 60000;
    final int numSegments = 3;
    underlying = new RocksDBSegmentedBytesStore("test", retention, numSegments, schema);
    final RocksDBSessionStore<Bytes, byte[]> sessionStore = new RocksDBSessionStore<>(underlying, Serdes.Bytes(), Serdes.ByteArray());
    cachingStore = new CachingSessionStore<>(sessionStore, Serdes.String(), Serdes.String(), Segments.segmentInterval(retention, numSegments));
    cache = new ThreadCache(new LogContext("testCache "), MAX_CACHE_SIZE_BYTES, new MockStreamsMetrics(new Metrics()));
    context = new InternalMockProcessorContext(TestUtils.tempDirectory(), null, null, null, cache);
    context.setRecordContext(new ProcessorRecordContext(DEFAULT_TIMESTAMP, 0, 0, "topic"));
    cachingStore.init(context, cachingStore);
}
Also used : LogContext(org.apache.kafka.common.utils.LogContext) MockStreamsMetrics(org.apache.kafka.streams.processor.internals.MockStreamsMetrics) Bytes(org.apache.kafka.common.utils.Bytes) MockStreamsMetrics(org.apache.kafka.streams.processor.internals.MockStreamsMetrics) Metrics(org.apache.kafka.common.metrics.Metrics) ProcessorRecordContext(org.apache.kafka.streams.processor.internals.ProcessorRecordContext) InternalMockProcessorContext(org.apache.kafka.test.InternalMockProcessorContext) Before(org.junit.Before)

Aggregations

LogContext (org.apache.kafka.common.utils.LogContext)53 Metrics (org.apache.kafka.common.metrics.Metrics)28 Test (org.junit.Test)27 Before (org.junit.Before)17 InternalMockProcessorContext (org.apache.kafka.test.InternalMockProcessorContext)14 MockStreamsMetrics (org.apache.kafka.streams.processor.internals.MockStreamsMetrics)13 NoOpRecordCollector (org.apache.kafka.test.NoOpRecordCollector)10 HashMap (java.util.HashMap)8 MockTime (org.apache.kafka.common.utils.MockTime)8 InetSocketAddress (java.net.InetSocketAddress)7 Properties (java.util.Properties)7 StreamsConfig (org.apache.kafka.streams.StreamsConfig)7 TopicPartition (org.apache.kafka.common.TopicPartition)6 File (java.io.File)5 NetworkClient (org.apache.kafka.clients.NetworkClient)5 InternalStreamsBuilderTest (org.apache.kafka.streams.kstream.internals.InternalStreamsBuilderTest)5 ServerSocketChannel (java.nio.channels.ServerSocketChannel)4 SocketChannel (java.nio.channels.SocketChannel)4 Metadata (org.apache.kafka.clients.Metadata)4 MockClient (org.apache.kafka.clients.MockClient)4