use of org.apache.kafka.clients.consumer.Consumer in project samza by apache.
the class TestKafkaSystemConsumer method testStartConsumer.
@Test
public void testStartConsumer() {
final Consumer consumer = Mockito.mock(Consumer.class);
final KafkaConsumerProxyFactory kafkaConsumerProxyFactory = Mockito.mock(KafkaConsumerProxyFactory.class);
final KafkaSystemConsumerMetrics kafkaSystemConsumerMetrics = new KafkaSystemConsumerMetrics(TEST_SYSTEM, new NoOpMetricsRegistry());
final SystemStreamPartition testSystemStreamPartition1 = new SystemStreamPartition(TEST_SYSTEM, TEST_STREAM, new Partition(0));
final SystemStreamPartition testSystemStreamPartition2 = new SystemStreamPartition(TEST_SYSTEM, TEST_STREAM, new Partition(1));
final String testOffset = "1";
final KafkaConsumerProxy kafkaConsumerProxy = Mockito.mock(KafkaConsumerProxy.class);
Mockito.when(kafkaConsumerProxyFactory.create(Mockito.anyObject())).thenReturn(kafkaConsumerProxy);
Mockito.doNothing().when(consumer).seek(new TopicPartition(TEST_STREAM, 0), 1);
Mockito.doNothing().when(consumer).seek(new TopicPartition(TEST_STREAM, 1), 1);
KafkaSystemConsumer kafkaSystemConsumer = new KafkaSystemConsumer(consumer, TEST_SYSTEM, new MapConfig(), TEST_CLIENT_ID, kafkaConsumerProxyFactory, kafkaSystemConsumerMetrics, new SystemClock());
kafkaSystemConsumer.register(testSystemStreamPartition1, testOffset);
kafkaSystemConsumer.register(testSystemStreamPartition2, testOffset);
kafkaSystemConsumer.startConsumer();
Mockito.verify(consumer).seek(new TopicPartition(TEST_STREAM, 0), 1);
Mockito.verify(consumer).seek(new TopicPartition(TEST_STREAM, 1), 1);
Mockito.verify(kafkaConsumerProxy).start();
Mockito.verify(kafkaConsumerProxy).addTopicPartition(testSystemStreamPartition1, Long.valueOf(testOffset));
Mockito.verify(kafkaConsumerProxy).addTopicPartition(testSystemStreamPartition2, Long.valueOf(testOffset));
}
use of org.apache.kafka.clients.consumer.Consumer in project kafka by apache.
the class MirrorConnectorsIntegrationBaseTest method waitForConsumerGroupOffsetSync.
/*
* given consumer group, topics and expected number of records, make sure the consumer group
* offsets are eventually synced to the expected offset numbers
*/
protected static <T> void waitForConsumerGroupOffsetSync(EmbeddedConnectCluster connect, Consumer<T, T> consumer, List<String> topics, String consumerGroupId, int numRecords) throws InterruptedException {
try (Admin adminClient = connect.kafka().createAdminClient()) {
List<TopicPartition> tps = new ArrayList<>(NUM_PARTITIONS * topics.size());
for (int partitionIndex = 0; partitionIndex < NUM_PARTITIONS; partitionIndex++) {
for (String topic : topics) {
tps.add(new TopicPartition(topic, partitionIndex));
}
}
long expectedTotalOffsets = numRecords * topics.size();
waitForCondition(() -> {
Map<TopicPartition, OffsetAndMetadata> consumerGroupOffsets = adminClient.listConsumerGroupOffsets(consumerGroupId).partitionsToOffsetAndMetadata().get();
long consumerGroupOffsetTotal = consumerGroupOffsets.values().stream().mapToLong(OffsetAndMetadata::offset).sum();
Map<TopicPartition, Long> offsets = consumer.endOffsets(tps, CONSUMER_POLL_TIMEOUT_MS);
long totalOffsets = offsets.values().stream().mapToLong(l -> l).sum();
// make sure the consumer group offsets are synced to expected number
return totalOffsets == expectedTotalOffsets && consumerGroupOffsetTotal > 0;
}, OFFSET_SYNC_DURATION_MS, "Consumer group offset sync is not complete in time");
}
}
use of org.apache.kafka.clients.consumer.Consumer in project kafka by apache.
the class StreamsPartitionAssignorTest method shouldRequestCommittedOffsetsForPreexistingSourceChangelogs.
@Test
public void shouldRequestCommittedOffsetsForPreexistingSourceChangelogs() {
final Set<TopicPartition> changelogs = mkSet(new TopicPartition("topic1", 0), new TopicPartition("topic1", 1), new TopicPartition("topic1", 2));
final StreamsBuilder streamsBuilder = new StreamsBuilder();
streamsBuilder.table("topic1", Materialized.as("store"));
final Properties props = new Properties();
props.putAll(configProps());
props.put(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, StreamsConfig.OPTIMIZE);
builder = TopologyWrapper.getInternalTopologyBuilder(streamsBuilder.build(props));
topologyMetadata = new TopologyMetadata(builder, new StreamsConfig(props));
subscriptions.put("consumer10", new Subscription(singletonList("topic1"), defaultSubscriptionInfo.encode()));
createDefaultMockTaskManager();
configurePartitionAssignorWith(singletonMap(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, StreamsConfig.OPTIMIZE));
overwriteInternalTopicManagerWithMock(false);
final Consumer<byte[], byte[]> consumerClient = referenceContainer.mainConsumer;
EasyMock.expect(consumerClient.committed(EasyMock.eq(changelogs))).andReturn(changelogs.stream().collect(Collectors.toMap(tp -> tp, tp -> new OffsetAndMetadata(Long.MAX_VALUE)))).once();
EasyMock.replay(consumerClient);
partitionAssignor.assign(metadata, new GroupSubscription(subscriptions));
EasyMock.verify(consumerClient);
}
use of org.apache.kafka.clients.consumer.Consumer in project kafka by apache.
the class HighAvailabilityTaskAssignorIntegrationTest method shouldScaleOutWithWarmupTasks.
private void shouldScaleOutWithWarmupTasks(final Function<String, Materialized<Object, Object, KeyValueStore<Bytes, byte[]>>> materializedFunction) throws InterruptedException {
final String testId = safeUniqueTestName(getClass(), testName);
final String appId = "appId_" + System.currentTimeMillis() + "_" + testId;
final String inputTopic = "input" + testId;
final Set<TopicPartition> inputTopicPartitions = mkSet(new TopicPartition(inputTopic, 0), new TopicPartition(inputTopic, 1));
final String storeName = "store" + testId;
final String storeChangelog = appId + "-store" + testId + "-changelog";
final Set<TopicPartition> changelogTopicPartitions = mkSet(new TopicPartition(storeChangelog, 0), new TopicPartition(storeChangelog, 1));
IntegrationTestUtils.cleanStateBeforeTest(CLUSTER, 2, inputTopic, storeChangelog);
final ReentrantLock assignmentLock = new ReentrantLock();
final AtomicInteger assignmentsCompleted = new AtomicInteger(0);
final Map<Integer, Boolean> assignmentsStable = new ConcurrentHashMap<>();
final AtomicBoolean assignmentStable = new AtomicBoolean(false);
final AssignmentListener assignmentListener = stable -> {
assignmentLock.lock();
try {
final int thisAssignmentIndex = assignmentsCompleted.incrementAndGet();
assignmentsStable.put(thisAssignmentIndex, stable);
assignmentStable.set(stable);
} finally {
assignmentLock.unlock();
}
};
final StreamsBuilder builder = new StreamsBuilder();
builder.table(inputTopic, materializedFunction.apply(storeName));
final Topology topology = builder.build();
final int numberOfRecords = 500;
produceTestData(inputTopic, numberOfRecords);
try (final KafkaStreams kafkaStreams0 = new KafkaStreams(topology, streamsProperties(appId, assignmentListener));
final KafkaStreams kafkaStreams1 = new KafkaStreams(topology, streamsProperties(appId, assignmentListener));
final Consumer<String, String> consumer = new KafkaConsumer<>(getConsumerProperties())) {
kafkaStreams0.start();
// sanity check: just make sure we actually wrote all the input records
TestUtils.waitForCondition(() -> getEndOffsetSum(inputTopicPartitions, consumer) == numberOfRecords, 120_000L, () -> "Input records haven't all been written to the input topic: " + getEndOffsetSum(inputTopicPartitions, consumer));
// wait until all the input records are in the changelog
TestUtils.waitForCondition(() -> getEndOffsetSum(changelogTopicPartitions, consumer) == numberOfRecords, 120_000L, () -> "Input records haven't all been written to the changelog: " + getEndOffsetSum(changelogTopicPartitions, consumer));
final AtomicLong instance1TotalRestored = new AtomicLong(-1);
final AtomicLong instance1NumRestored = new AtomicLong(-1);
final CountDownLatch restoreCompleteLatch = new CountDownLatch(1);
kafkaStreams1.setGlobalStateRestoreListener(new StateRestoreListener() {
@Override
public void onRestoreStart(final TopicPartition topicPartition, final String storeName, final long startingOffset, final long endingOffset) {
}
@Override
public void onBatchRestored(final TopicPartition topicPartition, final String storeName, final long batchEndOffset, final long numRestored) {
instance1NumRestored.accumulateAndGet(numRestored, (prev, restored) -> prev == -1 ? restored : prev + restored);
}
@Override
public void onRestoreEnd(final TopicPartition topicPartition, final String storeName, final long totalRestored) {
instance1TotalRestored.accumulateAndGet(totalRestored, (prev, restored) -> prev == -1 ? restored : prev + restored);
restoreCompleteLatch.countDown();
}
});
final int assignmentsBeforeScaleOut = assignmentsCompleted.get();
kafkaStreams1.start();
TestUtils.waitForCondition(() -> {
assignmentLock.lock();
try {
if (assignmentsCompleted.get() > assignmentsBeforeScaleOut) {
assertFalseNoRetry(assignmentsStable.get(assignmentsBeforeScaleOut + 1), "the first assignment after adding a node should be unstable while we warm up the state.");
return true;
} else {
return false;
}
} finally {
assignmentLock.unlock();
}
}, 120_000L, "Never saw a first assignment after scale out: " + assignmentsCompleted.get());
TestUtils.waitForCondition(assignmentStable::get, 120_000L, "Assignment hasn't become stable: " + assignmentsCompleted.get() + " Note, if this does fail, check and see if the new instance just failed to catch up within" + " the probing rebalance interval. A full minute should be long enough to read ~500 records" + " in any test environment, but you never know...");
restoreCompleteLatch.await();
// We should finalize the restoration without having restored any records (because they're already in
// the store. Otherwise, we failed to properly re-use the state from the standby.
assertThat(instance1TotalRestored.get(), is(0L));
// Belt-and-suspenders check that we never even attempt to restore any records.
assertThat(instance1NumRestored.get(), is(-1L));
}
}
use of org.apache.kafka.clients.consumer.Consumer in project kafka by apache.
the class StreamThreadTest method shouldConstructAdminMetrics.
@Test
public void shouldConstructAdminMetrics() {
final Node broker1 = new Node(0, "dummyHost-1", 1234);
final Node broker2 = new Node(1, "dummyHost-2", 1234);
final List<Node> cluster = Arrays.asList(broker1, broker2);
final MockAdminClient adminClient = new MockAdminClient.Builder().brokers(cluster).clusterId(null).build();
final Consumer<byte[], byte[]> consumer = EasyMock.createNiceMock(Consumer.class);
final ConsumerGroupMetadata consumerGroupMetadata = mock(ConsumerGroupMetadata.class);
expect(consumer.groupMetadata()).andStubReturn(consumerGroupMetadata);
expect(consumerGroupMetadata.groupInstanceId()).andReturn(Optional.empty());
EasyMock.replay(consumer, consumerGroupMetadata);
final TaskManager taskManager = EasyMock.createNiceMock(TaskManager.class);
final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, StreamsConfig.METRICS_LATEST, mockTime);
final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
topologyMetadata.buildAndRewriteTopology();
final StreamThread thread = new StreamThread(mockTime, config, adminClient, consumer, consumer, null, null, taskManager, streamsMetrics, topologyMetadata, CLIENT_ID, new LogContext(""), new AtomicInteger(), new AtomicLong(Long.MAX_VALUE), new LinkedList<>(), null, HANDLER, null);
final MetricName testMetricName = new MetricName("test_metric", "", "", new HashMap<>());
final Metric testMetric = new KafkaMetric(new Object(), testMetricName, (Measurable) (config, now) -> 0, null, new MockTime());
EasyMock.replay(taskManager);
adminClient.setMockMetrics(testMetricName, testMetric);
final Map<MetricName, Metric> adminClientMetrics = thread.adminClientMetrics();
assertEquals(testMetricName, adminClientMetrics.get(testMetricName).metricName());
}
Aggregations