use of org.apache.kafka.clients.consumer.Consumer in project kafka by apache.
the class StreamThreadTest method shouldTransmitTaskManagerMetrics.
@Test
public void shouldTransmitTaskManagerMetrics() {
final Consumer<byte[], byte[]> consumer = EasyMock.createNiceMock(Consumer.class);
final ConsumerGroupMetadata consumerGroupMetadata = mock(ConsumerGroupMetadata.class);
expect(consumer.groupMetadata()).andStubReturn(consumerGroupMetadata);
expect(consumerGroupMetadata.groupInstanceId()).andReturn(Optional.empty());
EasyMock.replay(consumer, consumerGroupMetadata);
final TaskManager taskManager = EasyMock.createNiceMock(TaskManager.class);
final MetricName testMetricName = new MetricName("test_metric", "", "", new HashMap<>());
final Metric testMetric = new KafkaMetric(new Object(), testMetricName, (Measurable) (config, now) -> 0, null, new MockTime());
final Map<MetricName, Metric> dummyProducerMetrics = singletonMap(testMetricName, testMetric);
expect(taskManager.producerMetrics()).andReturn(dummyProducerMetrics);
EasyMock.replay(taskManager);
final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, StreamsConfig.METRICS_LATEST, mockTime);
final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
topologyMetadata.buildAndRewriteTopology();
final StreamThread thread = buildStreamThread(consumer, taskManager, config, topologyMetadata);
assertThat(dummyProducerMetrics, is(thread.producerMetrics()));
}
use of org.apache.kafka.clients.consumer.Consumer in project kafka by apache.
the class TaskManagerTest method shouldReviveCorruptTasks.
@Test
public void shouldReviveCorruptTasks() {
final ProcessorStateManager stateManager = EasyMock.createStrictMock(ProcessorStateManager.class);
stateManager.markChangelogAsCorrupted(taskId00Partitions);
EasyMock.expectLastCall().once();
replay(stateManager);
final AtomicBoolean enforcedCheckpoint = new AtomicBoolean(false);
final StateMachineTask task00 = new StateMachineTask(taskId00, taskId00Partitions, true, stateManager) {
@Override
public void postCommit(final boolean enforceCheckpoint) {
if (enforceCheckpoint) {
enforcedCheckpoint.set(true);
}
super.postCommit(enforceCheckpoint);
}
};
// `handleAssignment`
expectRestoreToBeCompleted(consumer, changeLogReader);
expect(activeTaskCreator.createTasks(anyObject(), eq(taskId00Assignment))).andStubReturn(singletonList(task00));
topologyBuilder.addSubscribedTopicsFromAssignment(anyObject(), anyString());
expectLastCall().anyTimes();
expect(consumer.assignment()).andReturn(taskId00Partitions);
replay(activeTaskCreator, topologyBuilder, consumer, changeLogReader);
taskManager.handleAssignment(taskId00Assignment, emptyMap());
assertThat(taskManager.tryToCompleteRestoration(time.milliseconds(), tp -> assertThat(tp, is(empty()))), is(true));
assertThat(task00.state(), is(Task.State.RUNNING));
task00.setChangelogOffsets(singletonMap(t1p0, 0L));
taskManager.handleCorruption(singleton(taskId00));
assertThat(task00.commitPrepared, is(true));
assertThat(task00.state(), is(Task.State.CREATED));
assertThat(task00.partitionsForOffsetReset, equalTo(taskId00Partitions));
assertThat(enforcedCheckpoint.get(), is(true));
assertThat(taskManager.activeTaskMap(), is(singletonMap(taskId00, task00)));
assertThat(taskManager.standbyTaskMap(), Matchers.anEmptyMap());
verify(stateManager);
verify(consumer);
}
use of org.apache.kafka.clients.consumer.Consumer in project beam by apache.
the class KafkaIOTest method mkMockConsumer.
// Update mock consumer with records distributed among the given topics, each with given number
// of partitions. Records are assigned in round-robin order among the partitions.
private static MockConsumer<byte[], byte[]> mkMockConsumer(List<String> topics, int partitionsPerTopic, int numElements, OffsetResetStrategy offsetResetStrategy, Map<String, Object> config, SerializableFunction<Integer, byte[]> keyFunction, SerializableFunction<Integer, byte[]> valueFunction) {
final List<TopicPartition> partitions = new ArrayList<>();
final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> records = new HashMap<>();
Map<String, List<PartitionInfo>> partitionMap = new HashMap<>();
for (String topic : topics) {
List<PartitionInfo> partIds = new ArrayList<>(partitionsPerTopic);
for (int i = 0; i < partitionsPerTopic; i++) {
TopicPartition tp = new TopicPartition(topic, i);
partitions.add(tp);
partIds.add(new PartitionInfo(topic, i, null, null, null));
records.put(tp, new ArrayList<>());
}
partitionMap.put(topic, partIds);
}
int numPartitions = partitions.size();
final long[] offsets = new long[numPartitions];
long timestampStartMillis = (Long) config.getOrDefault(TIMESTAMP_START_MILLIS_CONFIG, LOG_APPEND_START_TIME.getMillis());
TimestampType timestampType = TimestampType.forName((String) config.getOrDefault(TIMESTAMP_TYPE_CONFIG, TimestampType.LOG_APPEND_TIME.toString()));
for (int i = 0; i < numElements; i++) {
int pIdx = i % numPartitions;
TopicPartition tp = partitions.get(pIdx);
byte[] key = keyFunction.apply(i);
byte[] value = valueFunction.apply(i);
records.get(tp).add(new ConsumerRecord<>(tp.topic(), tp.partition(), offsets[pIdx]++, timestampStartMillis + Duration.standardSeconds(i).getMillis(), timestampType, 0, key.length, value.length, key, value));
}
// This is updated when reader assigns partitions.
final AtomicReference<List<TopicPartition>> assignedPartitions = new AtomicReference<>(Collections.<TopicPartition>emptyList());
final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(offsetResetStrategy) {
@Override
public synchronized void assign(final Collection<TopicPartition> assigned) {
super.assign(assigned);
assignedPartitions.set(ImmutableList.copyOf(assigned));
for (TopicPartition tp : assigned) {
updateBeginningOffsets(ImmutableMap.of(tp, 0L));
updateEndOffsets(ImmutableMap.of(tp, (long) records.get(tp).size()));
}
}
// Override offsetsForTimes() in order to look up the offsets by timestamp.
@Override
public synchronized Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch) {
return timestampsToSearch.entrySet().stream().map(e -> {
// In test scope, timestamp == offset.
long maxOffset = offsets[partitions.indexOf(e.getKey())];
long offset = e.getValue();
OffsetAndTimestamp value = (offset >= maxOffset) ? null : new OffsetAndTimestamp(offset, offset);
return new SimpleEntry<>(e.getKey(), value);
}).collect(Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue));
}
};
for (String topic : topics) {
consumer.updatePartitions(topic, partitionMap.get(topic));
}
// MockConsumer does not maintain any relationship between partition seek position and the
// records added. e.g. if we add 10 records to a partition and then seek to end of the
// partition, MockConsumer is still going to return the 10 records in next poll. It is
// our responsibility to make sure currently enqueued records sync with partition offsets.
// The following task will be called inside each invocation to MockConsumer.poll().
// We enqueue only the records with the offset >= partition's current position.
Runnable recordEnqueueTask = new Runnable() {
@Override
public void run() {
// add all the records with offset >= current partition position.
int recordsAdded = 0;
for (TopicPartition tp : assignedPartitions.get()) {
long curPos = consumer.position(tp);
for (ConsumerRecord<byte[], byte[]> r : records.get(tp)) {
if (r.offset() >= curPos) {
consumer.addRecord(r);
recordsAdded++;
}
}
}
if (recordsAdded == 0) {
if (config.get("inject.error.at.eof") != null) {
consumer.setException(new KafkaException("Injected error in consumer.poll()"));
}
// MockConsumer.poll(timeout) does not actually wait even when there aren't any
// records.
// Add a small wait here in order to avoid busy looping in the reader.
Uninterruptibles.sleepUninterruptibly(10, TimeUnit.MILLISECONDS);
// TODO: BEAM-4086: testUnboundedSourceWithoutBoundedWrapper() occasionally hangs
// without this wait. Need to look into it.
}
consumer.schedulePollTask(this);
}
};
consumer.schedulePollTask(recordEnqueueTask);
return consumer;
}
use of org.apache.kafka.clients.consumer.Consumer in project beam by apache.
the class KafkaCommitOffsetTest method testCommitOffsetDoFn.
@Test
public void testCommitOffsetDoFn() {
Map<String, Object> configMap = new HashMap<>();
configMap.put(ConsumerConfig.GROUP_ID_CONFIG, "group1");
ReadSourceDescriptors<Object, Object> descriptors = ReadSourceDescriptors.read().withBootstrapServers("bootstrap_server").withConsumerConfigUpdates(configMap).withConsumerFactoryFn(new SerializableFunction<Map<String, Object>, Consumer<byte[], byte[]>>() {
@Override
public Consumer<byte[], byte[]> apply(Map<String, Object> input) {
Assert.assertEquals("group1", input.get(ConsumerConfig.GROUP_ID_CONFIG));
return consumer;
}
});
CommitOffsetDoFn doFn = new CommitOffsetDoFn(descriptors);
doFn.processElement(KV.of(KafkaSourceDescriptor.of(partition, null, null, null, null, null), 1L));
Assert.assertEquals(2L, consumer.commit.get(partition).offset());
}
use of org.apache.kafka.clients.consumer.Consumer in project kafka by apache.
the class AbstractTaskTest method shouldThrowProcessorStateExceptionOnInitializeOffsetsWhenAuthorizationException.
@Test(expected = ProcessorStateException.class)
public void shouldThrowProcessorStateExceptionOnInitializeOffsetsWhenAuthorizationException() throws Exception {
final Consumer consumer = mockConsumer(new AuthorizationException("blah"));
final AbstractTask task = createTask(consumer);
task.initializeOffsetLimits();
}
Aggregations