Search in sources :

Example 1 with GroupRebalanceConfig

use of org.apache.kafka.clients.GroupRebalanceConfig in project kafka by apache.

the class AbstractCoordinatorTest method setupCoordinator.

private void setupCoordinator(int retryBackoffMs, int rebalanceTimeoutMs, Optional<String> groupInstanceId) {
    LogContext logContext = new LogContext();
    this.mockTime = new MockTime();
    ConsumerMetadata metadata = new ConsumerMetadata(retryBackoffMs, 60 * 60 * 1000L, false, false, new SubscriptionState(logContext, OffsetResetStrategy.EARLIEST), logContext, new ClusterResourceListeners());
    this.mockClient = new MockClient(mockTime, metadata);
    this.consumerClient = new ConsumerNetworkClient(logContext, mockClient, metadata, mockTime, retryBackoffMs, REQUEST_TIMEOUT_MS, HEARTBEAT_INTERVAL_MS);
    metrics = new Metrics(mockTime);
    mockClient.updateMetadata(RequestTestUtils.metadataUpdateWith(1, emptyMap()));
    this.node = metadata.fetch().nodes().get(0);
    this.coordinatorNode = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
    GroupRebalanceConfig rebalanceConfig = new GroupRebalanceConfig(SESSION_TIMEOUT_MS, rebalanceTimeoutMs, HEARTBEAT_INTERVAL_MS, GROUP_ID, groupInstanceId, retryBackoffMs, !groupInstanceId.isPresent());
    this.coordinator = new DummyCoordinator(rebalanceConfig, consumerClient, metrics, mockTime);
}
Also used : GroupRebalanceConfig(org.apache.kafka.clients.GroupRebalanceConfig) ClusterResourceListeners(org.apache.kafka.common.internals.ClusterResourceListeners) Metrics(org.apache.kafka.common.metrics.Metrics) Node(org.apache.kafka.common.Node) LogContext(org.apache.kafka.common.utils.LogContext) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient)

Example 2 with GroupRebalanceConfig

use of org.apache.kafka.clients.GroupRebalanceConfig in project kafka by apache.

the class WorkerCoordinatorTest method setup.

@Before
public void setup() {
    LogContext logContext = new LogContext();
    this.time = new MockTime();
    this.metadata = new Metadata(0, Long.MAX_VALUE, logContext, new ClusterResourceListeners());
    this.client = new MockClient(time, metadata);
    this.client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)));
    this.node = metadata.fetch().nodes().get(0);
    this.consumerClient = new ConsumerNetworkClient(logContext, client, metadata, time, 100, 1000, heartbeatIntervalMs);
    this.metrics = new Metrics(time);
    this.rebalanceListener = new MockRebalanceListener();
    this.configStorage = PowerMock.createMock(KafkaConfigBackingStore.class);
    this.rebalanceConfig = new GroupRebalanceConfig(sessionTimeoutMs, rebalanceTimeoutMs, heartbeatIntervalMs, groupId, Optional.empty(), retryBackoffMs, true);
    this.coordinator = new WorkerCoordinator(rebalanceConfig, logContext, consumerClient, metrics, "consumer" + groupId, time, LEADER_URL, configStorage, rebalanceListener, compatibility, 0);
    configState1 = new ClusterConfigState(1L, null, Collections.singletonMap(connectorId1, 1), Collections.singletonMap(connectorId1, new HashMap<>()), Collections.singletonMap(connectorId1, TargetState.STARTED), Collections.singletonMap(taskId1x0, new HashMap<>()), Collections.emptySet());
    Map<String, Integer> configState2ConnectorTaskCounts = new HashMap<>();
    configState2ConnectorTaskCounts.put(connectorId1, 2);
    configState2ConnectorTaskCounts.put(connectorId2, 1);
    Map<String, Map<String, String>> configState2ConnectorConfigs = new HashMap<>();
    configState2ConnectorConfigs.put(connectorId1, new HashMap<>());
    configState2ConnectorConfigs.put(connectorId2, new HashMap<>());
    Map<String, TargetState> configState2TargetStates = new HashMap<>();
    configState2TargetStates.put(connectorId1, TargetState.STARTED);
    configState2TargetStates.put(connectorId2, TargetState.STARTED);
    Map<ConnectorTaskId, Map<String, String>> configState2TaskConfigs = new HashMap<>();
    configState2TaskConfigs.put(taskId1x0, new HashMap<>());
    configState2TaskConfigs.put(taskId1x1, new HashMap<>());
    configState2TaskConfigs.put(taskId2x0, new HashMap<>());
    configState2 = new ClusterConfigState(2L, null, configState2ConnectorTaskCounts, configState2ConnectorConfigs, configState2TargetStates, configState2TaskConfigs, Collections.emptySet());
    Map<String, Integer> configStateSingleTaskConnectorsConnectorTaskCounts = new HashMap<>();
    configStateSingleTaskConnectorsConnectorTaskCounts.put(connectorId1, 1);
    configStateSingleTaskConnectorsConnectorTaskCounts.put(connectorId2, 1);
    configStateSingleTaskConnectorsConnectorTaskCounts.put(connectorId3, 1);
    Map<String, Map<String, String>> configStateSingleTaskConnectorsConnectorConfigs = new HashMap<>();
    configStateSingleTaskConnectorsConnectorConfigs.put(connectorId1, new HashMap<>());
    configStateSingleTaskConnectorsConnectorConfigs.put(connectorId2, new HashMap<>());
    configStateSingleTaskConnectorsConnectorConfigs.put(connectorId3, new HashMap<>());
    Map<String, TargetState> configStateSingleTaskConnectorsTargetStates = new HashMap<>();
    configStateSingleTaskConnectorsTargetStates.put(connectorId1, TargetState.STARTED);
    configStateSingleTaskConnectorsTargetStates.put(connectorId2, TargetState.STARTED);
    configStateSingleTaskConnectorsTargetStates.put(connectorId3, TargetState.STARTED);
    Map<ConnectorTaskId, Map<String, String>> configStateSingleTaskConnectorsTaskConfigs = new HashMap<>();
    configStateSingleTaskConnectorsTaskConfigs.put(taskId1x0, new HashMap<>());
    configStateSingleTaskConnectorsTaskConfigs.put(taskId2x0, new HashMap<>());
    configStateSingleTaskConnectorsTaskConfigs.put(taskId3x0, new HashMap<>());
    configStateSingleTaskConnectors = new ClusterConfigState(2L, null, configStateSingleTaskConnectorsConnectorTaskCounts, configStateSingleTaskConnectorsConnectorConfigs, configStateSingleTaskConnectorsTargetStates, configStateSingleTaskConnectorsTaskConfigs, Collections.emptySet());
}
Also used : TargetState(org.apache.kafka.connect.runtime.TargetState) ClusterResourceListeners(org.apache.kafka.common.internals.ClusterResourceListeners) ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) HashMap(java.util.HashMap) Metadata(org.apache.kafka.clients.Metadata) LogContext(org.apache.kafka.common.utils.LogContext) KafkaConfigBackingStore(org.apache.kafka.connect.storage.KafkaConfigBackingStore) GroupRebalanceConfig(org.apache.kafka.clients.GroupRebalanceConfig) Metrics(org.apache.kafka.common.metrics.Metrics) ConsumerNetworkClient(org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient) HashMap(java.util.HashMap) Map(java.util.Map) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) Before(org.junit.Before)

Example 3 with GroupRebalanceConfig

use of org.apache.kafka.clients.GroupRebalanceConfig in project kafka by apache.

the class KafkaConsumerTest method newConsumer.

private KafkaConsumer<String, String> newConsumer(Time time, KafkaClient client, SubscriptionState subscription, ConsumerMetadata metadata, ConsumerPartitionAssignor assignor, boolean autoCommitEnabled, String groupId, Optional<String> groupInstanceId, Optional<Deserializer<String>> valueDeserializer, boolean throwOnStableOffsetNotSupported) {
    String clientId = "mock-consumer";
    String metricGroupPrefix = "consumer";
    long retryBackoffMs = 100;
    int minBytes = 1;
    int maxBytes = Integer.MAX_VALUE;
    int maxWaitMs = 500;
    int fetchSize = 1024 * 1024;
    int maxPollRecords = Integer.MAX_VALUE;
    boolean checkCrcs = true;
    int rebalanceTimeoutMs = 60000;
    Deserializer<String> keyDeserializer = new StringDeserializer();
    Deserializer<String> deserializer = valueDeserializer.orElse(new StringDeserializer());
    List<ConsumerPartitionAssignor> assignors = singletonList(assignor);
    ConsumerInterceptors<String, String> interceptors = new ConsumerInterceptors<>(Collections.emptyList());
    Metrics metrics = new Metrics(time);
    ConsumerMetrics metricsRegistry = new ConsumerMetrics(metricGroupPrefix);
    LogContext loggerFactory = new LogContext();
    ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(loggerFactory, client, metadata, time, retryBackoffMs, requestTimeoutMs, heartbeatIntervalMs);
    ConsumerCoordinator consumerCoordinator = null;
    if (groupId != null) {
        GroupRebalanceConfig rebalanceConfig = new GroupRebalanceConfig(sessionTimeoutMs, rebalanceTimeoutMs, heartbeatIntervalMs, groupId, groupInstanceId, retryBackoffMs, true);
        consumerCoordinator = new ConsumerCoordinator(rebalanceConfig, loggerFactory, consumerClient, assignors, metadata, subscription, metrics, metricGroupPrefix, time, autoCommitEnabled, autoCommitIntervalMs, interceptors, throwOnStableOffsetNotSupported);
    }
    Fetcher<String, String> fetcher = new Fetcher<>(loggerFactory, consumerClient, minBytes, maxBytes, maxWaitMs, fetchSize, maxPollRecords, checkCrcs, "", keyDeserializer, deserializer, metadata, subscription, metrics, metricsRegistry.fetcherMetrics, time, retryBackoffMs, requestTimeoutMs, IsolationLevel.READ_UNCOMMITTED, new ApiVersions());
    return new KafkaConsumer<>(loggerFactory, clientId, consumerCoordinator, keyDeserializer, deserializer, fetcher, interceptors, time, consumerClient, metrics, subscription, metadata, retryBackoffMs, requestTimeoutMs, defaultApiTimeoutMs, assignors, groupId);
}
Also used : ConsumerInterceptors(org.apache.kafka.clients.consumer.internals.ConsumerInterceptors) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) ConsumerCoordinator(org.apache.kafka.clients.consumer.internals.ConsumerCoordinator) LogContext(org.apache.kafka.common.utils.LogContext) GroupRebalanceConfig(org.apache.kafka.clients.GroupRebalanceConfig) Metrics(org.apache.kafka.common.metrics.Metrics) ConsumerMetrics(org.apache.kafka.clients.consumer.internals.ConsumerMetrics) ConsumerNetworkClient(org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) ApiVersions(org.apache.kafka.clients.ApiVersions) Fetcher(org.apache.kafka.clients.consumer.internals.Fetcher) ConsumerMetrics(org.apache.kafka.clients.consumer.internals.ConsumerMetrics)

Example 4 with GroupRebalanceConfig

use of org.apache.kafka.clients.GroupRebalanceConfig in project kafka by apache.

the class HeartbeatTest method setUp.

@BeforeEach
public void setUp() {
    GroupRebalanceConfig rebalanceConfig = new GroupRebalanceConfig(sessionTimeoutMs, maxPollIntervalMs, heartbeatIntervalMs, "group_id", Optional.empty(), retryBackoffMs, true);
    heartbeat = new Heartbeat(rebalanceConfig, time);
}
Also used : GroupRebalanceConfig(org.apache.kafka.clients.GroupRebalanceConfig) BeforeEach(org.junit.jupiter.api.BeforeEach)

Example 5 with GroupRebalanceConfig

use of org.apache.kafka.clients.GroupRebalanceConfig in project kafka by apache.

the class WorkerCoordinatorIncrementalTest method setup.

@Before
public void setup() {
    LogContext loggerFactory = new LogContext();
    this.time = new MockTime();
    this.metadata = new Metadata(0, Long.MAX_VALUE, loggerFactory, new ClusterResourceListeners());
    this.client = new MockClient(time, metadata);
    this.client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)));
    this.node = metadata.fetch().nodes().get(0);
    this.consumerClient = new ConsumerNetworkClient(loggerFactory, client, metadata, time, retryBackoffMs, requestTimeoutMs, heartbeatIntervalMs);
    this.metrics = new Metrics(time);
    this.rebalanceListener = new MockRebalanceListener();
    this.leaderId = "worker1";
    this.memberId = "worker2";
    this.anotherMemberId = "worker3";
    this.leaderUrl = expectedUrl(leaderId);
    this.memberUrl = expectedUrl(memberId);
    this.anotherMemberUrl = expectedUrl(anotherMemberId);
    this.generationId = 3;
    this.offset = 10L;
    this.configStorageCalls = 0;
    this.rebalanceConfig = new GroupRebalanceConfig(sessionTimeoutMs, rebalanceTimeoutMs, heartbeatIntervalMs, groupId, Optional.empty(), retryBackoffMs, true);
    this.coordinator = new WorkerCoordinator(rebalanceConfig, loggerFactory, consumerClient, metrics, "worker" + groupId, time, expectedUrl(leaderId), configStorage, rebalanceListener, compatibility, rebalanceDelay);
    configState1 = clusterConfigState(offset, 2, 4);
}
Also used : GroupRebalanceConfig(org.apache.kafka.clients.GroupRebalanceConfig) ClusterResourceListeners(org.apache.kafka.common.internals.ClusterResourceListeners) Metrics(org.apache.kafka.common.metrics.Metrics) ConsumerNetworkClient(org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient) Metadata(org.apache.kafka.clients.Metadata) LogContext(org.apache.kafka.common.utils.LogContext) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) Before(org.junit.Before)

Aggregations

GroupRebalanceConfig (org.apache.kafka.clients.GroupRebalanceConfig)5 Metrics (org.apache.kafka.common.metrics.Metrics)4 LogContext (org.apache.kafka.common.utils.LogContext)4 MockClient (org.apache.kafka.clients.MockClient)3 ConsumerNetworkClient (org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient)3 ClusterResourceListeners (org.apache.kafka.common.internals.ClusterResourceListeners)3 MockTime (org.apache.kafka.common.utils.MockTime)3 Metadata (org.apache.kafka.clients.Metadata)2 Before (org.junit.Before)2 HashMap (java.util.HashMap)1 Map (java.util.Map)1 ApiVersions (org.apache.kafka.clients.ApiVersions)1 NodeApiVersions (org.apache.kafka.clients.NodeApiVersions)1 ConsumerCoordinator (org.apache.kafka.clients.consumer.internals.ConsumerCoordinator)1 ConsumerInterceptors (org.apache.kafka.clients.consumer.internals.ConsumerInterceptors)1 ConsumerMetrics (org.apache.kafka.clients.consumer.internals.ConsumerMetrics)1 Fetcher (org.apache.kafka.clients.consumer.internals.Fetcher)1 Node (org.apache.kafka.common.Node)1 StringDeserializer (org.apache.kafka.common.serialization.StringDeserializer)1 TargetState (org.apache.kafka.connect.runtime.TargetState)1