use of org.apache.kafka.clients.GroupRebalanceConfig in project kafka by apache.
the class AbstractCoordinatorTest method setupCoordinator.
private void setupCoordinator(int retryBackoffMs, int rebalanceTimeoutMs, Optional<String> groupInstanceId) {
LogContext logContext = new LogContext();
this.mockTime = new MockTime();
ConsumerMetadata metadata = new ConsumerMetadata(retryBackoffMs, 60 * 60 * 1000L, false, false, new SubscriptionState(logContext, OffsetResetStrategy.EARLIEST), logContext, new ClusterResourceListeners());
this.mockClient = new MockClient(mockTime, metadata);
this.consumerClient = new ConsumerNetworkClient(logContext, mockClient, metadata, mockTime, retryBackoffMs, REQUEST_TIMEOUT_MS, HEARTBEAT_INTERVAL_MS);
metrics = new Metrics(mockTime);
mockClient.updateMetadata(RequestTestUtils.metadataUpdateWith(1, emptyMap()));
this.node = metadata.fetch().nodes().get(0);
this.coordinatorNode = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
GroupRebalanceConfig rebalanceConfig = new GroupRebalanceConfig(SESSION_TIMEOUT_MS, rebalanceTimeoutMs, HEARTBEAT_INTERVAL_MS, GROUP_ID, groupInstanceId, retryBackoffMs, !groupInstanceId.isPresent());
this.coordinator = new DummyCoordinator(rebalanceConfig, consumerClient, metrics, mockTime);
}
use of org.apache.kafka.clients.GroupRebalanceConfig in project kafka by apache.
the class WorkerCoordinatorTest method setup.
@Before
public void setup() {
LogContext logContext = new LogContext();
this.time = new MockTime();
this.metadata = new Metadata(0, Long.MAX_VALUE, logContext, new ClusterResourceListeners());
this.client = new MockClient(time, metadata);
this.client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)));
this.node = metadata.fetch().nodes().get(0);
this.consumerClient = new ConsumerNetworkClient(logContext, client, metadata, time, 100, 1000, heartbeatIntervalMs);
this.metrics = new Metrics(time);
this.rebalanceListener = new MockRebalanceListener();
this.configStorage = PowerMock.createMock(KafkaConfigBackingStore.class);
this.rebalanceConfig = new GroupRebalanceConfig(sessionTimeoutMs, rebalanceTimeoutMs, heartbeatIntervalMs, groupId, Optional.empty(), retryBackoffMs, true);
this.coordinator = new WorkerCoordinator(rebalanceConfig, logContext, consumerClient, metrics, "consumer" + groupId, time, LEADER_URL, configStorage, rebalanceListener, compatibility, 0);
configState1 = new ClusterConfigState(1L, null, Collections.singletonMap(connectorId1, 1), Collections.singletonMap(connectorId1, new HashMap<>()), Collections.singletonMap(connectorId1, TargetState.STARTED), Collections.singletonMap(taskId1x0, new HashMap<>()), Collections.emptySet());
Map<String, Integer> configState2ConnectorTaskCounts = new HashMap<>();
configState2ConnectorTaskCounts.put(connectorId1, 2);
configState2ConnectorTaskCounts.put(connectorId2, 1);
Map<String, Map<String, String>> configState2ConnectorConfigs = new HashMap<>();
configState2ConnectorConfigs.put(connectorId1, new HashMap<>());
configState2ConnectorConfigs.put(connectorId2, new HashMap<>());
Map<String, TargetState> configState2TargetStates = new HashMap<>();
configState2TargetStates.put(connectorId1, TargetState.STARTED);
configState2TargetStates.put(connectorId2, TargetState.STARTED);
Map<ConnectorTaskId, Map<String, String>> configState2TaskConfigs = new HashMap<>();
configState2TaskConfigs.put(taskId1x0, new HashMap<>());
configState2TaskConfigs.put(taskId1x1, new HashMap<>());
configState2TaskConfigs.put(taskId2x0, new HashMap<>());
configState2 = new ClusterConfigState(2L, null, configState2ConnectorTaskCounts, configState2ConnectorConfigs, configState2TargetStates, configState2TaskConfigs, Collections.emptySet());
Map<String, Integer> configStateSingleTaskConnectorsConnectorTaskCounts = new HashMap<>();
configStateSingleTaskConnectorsConnectorTaskCounts.put(connectorId1, 1);
configStateSingleTaskConnectorsConnectorTaskCounts.put(connectorId2, 1);
configStateSingleTaskConnectorsConnectorTaskCounts.put(connectorId3, 1);
Map<String, Map<String, String>> configStateSingleTaskConnectorsConnectorConfigs = new HashMap<>();
configStateSingleTaskConnectorsConnectorConfigs.put(connectorId1, new HashMap<>());
configStateSingleTaskConnectorsConnectorConfigs.put(connectorId2, new HashMap<>());
configStateSingleTaskConnectorsConnectorConfigs.put(connectorId3, new HashMap<>());
Map<String, TargetState> configStateSingleTaskConnectorsTargetStates = new HashMap<>();
configStateSingleTaskConnectorsTargetStates.put(connectorId1, TargetState.STARTED);
configStateSingleTaskConnectorsTargetStates.put(connectorId2, TargetState.STARTED);
configStateSingleTaskConnectorsTargetStates.put(connectorId3, TargetState.STARTED);
Map<ConnectorTaskId, Map<String, String>> configStateSingleTaskConnectorsTaskConfigs = new HashMap<>();
configStateSingleTaskConnectorsTaskConfigs.put(taskId1x0, new HashMap<>());
configStateSingleTaskConnectorsTaskConfigs.put(taskId2x0, new HashMap<>());
configStateSingleTaskConnectorsTaskConfigs.put(taskId3x0, new HashMap<>());
configStateSingleTaskConnectors = new ClusterConfigState(2L, null, configStateSingleTaskConnectorsConnectorTaskCounts, configStateSingleTaskConnectorsConnectorConfigs, configStateSingleTaskConnectorsTargetStates, configStateSingleTaskConnectorsTaskConfigs, Collections.emptySet());
}
use of org.apache.kafka.clients.GroupRebalanceConfig in project kafka by apache.
the class KafkaConsumerTest method newConsumer.
private KafkaConsumer<String, String> newConsumer(Time time, KafkaClient client, SubscriptionState subscription, ConsumerMetadata metadata, ConsumerPartitionAssignor assignor, boolean autoCommitEnabled, String groupId, Optional<String> groupInstanceId, Optional<Deserializer<String>> valueDeserializer, boolean throwOnStableOffsetNotSupported) {
String clientId = "mock-consumer";
String metricGroupPrefix = "consumer";
long retryBackoffMs = 100;
int minBytes = 1;
int maxBytes = Integer.MAX_VALUE;
int maxWaitMs = 500;
int fetchSize = 1024 * 1024;
int maxPollRecords = Integer.MAX_VALUE;
boolean checkCrcs = true;
int rebalanceTimeoutMs = 60000;
Deserializer<String> keyDeserializer = new StringDeserializer();
Deserializer<String> deserializer = valueDeserializer.orElse(new StringDeserializer());
List<ConsumerPartitionAssignor> assignors = singletonList(assignor);
ConsumerInterceptors<String, String> interceptors = new ConsumerInterceptors<>(Collections.emptyList());
Metrics metrics = new Metrics(time);
ConsumerMetrics metricsRegistry = new ConsumerMetrics(metricGroupPrefix);
LogContext loggerFactory = new LogContext();
ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(loggerFactory, client, metadata, time, retryBackoffMs, requestTimeoutMs, heartbeatIntervalMs);
ConsumerCoordinator consumerCoordinator = null;
if (groupId != null) {
GroupRebalanceConfig rebalanceConfig = new GroupRebalanceConfig(sessionTimeoutMs, rebalanceTimeoutMs, heartbeatIntervalMs, groupId, groupInstanceId, retryBackoffMs, true);
consumerCoordinator = new ConsumerCoordinator(rebalanceConfig, loggerFactory, consumerClient, assignors, metadata, subscription, metrics, metricGroupPrefix, time, autoCommitEnabled, autoCommitIntervalMs, interceptors, throwOnStableOffsetNotSupported);
}
Fetcher<String, String> fetcher = new Fetcher<>(loggerFactory, consumerClient, minBytes, maxBytes, maxWaitMs, fetchSize, maxPollRecords, checkCrcs, "", keyDeserializer, deserializer, metadata, subscription, metrics, metricsRegistry.fetcherMetrics, time, retryBackoffMs, requestTimeoutMs, IsolationLevel.READ_UNCOMMITTED, new ApiVersions());
return new KafkaConsumer<>(loggerFactory, clientId, consumerCoordinator, keyDeserializer, deserializer, fetcher, interceptors, time, consumerClient, metrics, subscription, metadata, retryBackoffMs, requestTimeoutMs, defaultApiTimeoutMs, assignors, groupId);
}
use of org.apache.kafka.clients.GroupRebalanceConfig in project kafka by apache.
the class HeartbeatTest method setUp.
@BeforeEach
public void setUp() {
GroupRebalanceConfig rebalanceConfig = new GroupRebalanceConfig(sessionTimeoutMs, maxPollIntervalMs, heartbeatIntervalMs, "group_id", Optional.empty(), retryBackoffMs, true);
heartbeat = new Heartbeat(rebalanceConfig, time);
}
use of org.apache.kafka.clients.GroupRebalanceConfig in project kafka by apache.
the class WorkerCoordinatorIncrementalTest method setup.
@Before
public void setup() {
LogContext loggerFactory = new LogContext();
this.time = new MockTime();
this.metadata = new Metadata(0, Long.MAX_VALUE, loggerFactory, new ClusterResourceListeners());
this.client = new MockClient(time, metadata);
this.client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)));
this.node = metadata.fetch().nodes().get(0);
this.consumerClient = new ConsumerNetworkClient(loggerFactory, client, metadata, time, retryBackoffMs, requestTimeoutMs, heartbeatIntervalMs);
this.metrics = new Metrics(time);
this.rebalanceListener = new MockRebalanceListener();
this.leaderId = "worker1";
this.memberId = "worker2";
this.anotherMemberId = "worker3";
this.leaderUrl = expectedUrl(leaderId);
this.memberUrl = expectedUrl(memberId);
this.anotherMemberUrl = expectedUrl(anotherMemberId);
this.generationId = 3;
this.offset = 10L;
this.configStorageCalls = 0;
this.rebalanceConfig = new GroupRebalanceConfig(sessionTimeoutMs, rebalanceTimeoutMs, heartbeatIntervalMs, groupId, Optional.empty(), retryBackoffMs, true);
this.coordinator = new WorkerCoordinator(rebalanceConfig, loggerFactory, consumerClient, metrics, "worker" + groupId, time, expectedUrl(leaderId), configStorage, rebalanceListener, compatibility, rebalanceDelay);
configState1 = clusterConfigState(offset, 2, 4);
}
Aggregations