use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.
the class StreamThreadTest method shouldChangeStateInRebalanceListener.
@Test
public void shouldChangeStateInRebalanceListener() {
final StreamThread thread = createStreamThread(CLIENT_ID, config, false);
final StateListenerStub stateListener = new StateListenerStub();
thread.setStateListener(stateListener);
assertEquals(thread.state(), StreamThread.State.CREATED);
final ConsumerRebalanceListener rebalanceListener = thread.rebalanceListener();
final List<TopicPartition> revokedPartitions;
final List<TopicPartition> assignedPartitions;
// revoke nothing
thread.setState(StreamThread.State.STARTING);
revokedPartitions = Collections.emptyList();
rebalanceListener.onPartitionsRevoked(revokedPartitions);
assertEquals(thread.state(), StreamThread.State.PARTITIONS_REVOKED);
// assign single partition
assignedPartitions = Collections.singletonList(t1p1);
final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
mockConsumer.assign(assignedPartitions);
mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
rebalanceListener.onPartitionsAssigned(assignedPartitions);
thread.runOnce();
assertEquals(thread.state(), StreamThread.State.RUNNING);
Assert.assertEquals(4, stateListener.numChanges);
Assert.assertEquals(StreamThread.State.PARTITIONS_ASSIGNED, stateListener.oldState);
thread.shutdown();
assertSame(StreamThread.State.PENDING_SHUTDOWN, thread.state());
}
use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.
the class StreamTaskTest method createDisconnectedTask.
private StreamTask createDisconnectedTask(final StreamsConfig config) {
final MockKeyValueStore stateStore = new MockKeyValueStore(storeName, false);
final ProcessorTopology topology = ProcessorTopologyFactories.with(asList(source1, source2), mkMap(mkEntry(topic1, source1), mkEntry(topic2, source2)), singletonList(stateStore), emptyMap());
final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {
@Override
public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) {
throw new TimeoutException("KABOOM!");
}
};
final InternalProcessorContext context = new ProcessorContextImpl(taskId, config, stateManager, streamsMetrics, null);
return new StreamTask(taskId, partitions, topology, consumer, new TopologyConfig(null, config, new Properties()).getTaskConfig(), streamsMetrics, stateDirectory, cache, time, stateManager, recordCollector, context, logContext);
}
use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.
the class GlobalStreamThreadTest method before.
@Before
public void before() {
final MaterializedInternal<Object, Object, KeyValueStore<Bytes, byte[]>> materialized = new MaterializedInternal<>(Materialized.with(null, null), new InternalNameProvider() {
@Override
public String newProcessorName(final String prefix) {
return "processorName";
}
@Override
public String newStoreName(final String prefix) {
return GLOBAL_STORE_NAME;
}
}, "store-");
final ProcessorSupplier<Object, Object, Void, Void> processorSupplier = () -> new ContextualProcessor<Object, Object, Void, Void>() {
@Override
public void process(final Record<Object, Object> record) {
}
};
builder.addGlobalStore(new TimestampedKeyValueStoreMaterializer<>(materialized).materialize().withLoggingDisabled(), "sourceName", null, null, null, GLOBAL_STORE_TOPIC_NAME, "processorName", processorSupplier);
baseDirectoryName = TestUtils.tempDirectory().getAbsolutePath();
final HashMap<String, Object> properties = new HashMap<>();
properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "blah");
properties.put(StreamsConfig.APPLICATION_ID_CONFIG, "testAppId");
properties.put(StreamsConfig.STATE_DIR_CONFIG, baseDirectoryName);
properties.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class.getName());
properties.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class.getName());
config = new StreamsConfig(properties);
globalStreamThread = new GlobalStreamThread(builder.rewriteTopology(config).buildGlobalStateTopology(), config, mockConsumer, new StateDirectory(config, time, true, false), 0, new StreamsMetricsImpl(new Metrics(), "test-client", StreamsConfig.METRICS_LATEST, time), time, "clientId", stateRestoreListener, e -> {
});
}
use of org.apache.kafka.clients.consumer.MockConsumer in project beam by apache.
the class KafkaIOTest method mkMockConsumer.
// Update mock consumer with records distributed among the given topics, each with given number
// of partitions. Records are assigned in round-robin order among the partitions.
private static MockConsumer<byte[], byte[]> mkMockConsumer(List<String> topics, int partitionsPerTopic, int numElements, OffsetResetStrategy offsetResetStrategy, Map<String, Object> config, SerializableFunction<Integer, byte[]> keyFunction, SerializableFunction<Integer, byte[]> valueFunction) {
final List<TopicPartition> partitions = new ArrayList<>();
final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> records = new HashMap<>();
Map<String, List<PartitionInfo>> partitionMap = new HashMap<>();
for (String topic : topics) {
List<PartitionInfo> partIds = new ArrayList<>(partitionsPerTopic);
for (int i = 0; i < partitionsPerTopic; i++) {
TopicPartition tp = new TopicPartition(topic, i);
partitions.add(tp);
partIds.add(new PartitionInfo(topic, i, null, null, null));
records.put(tp, new ArrayList<>());
}
partitionMap.put(topic, partIds);
}
int numPartitions = partitions.size();
final long[] offsets = new long[numPartitions];
long timestampStartMillis = (Long) config.getOrDefault(TIMESTAMP_START_MILLIS_CONFIG, LOG_APPEND_START_TIME.getMillis());
TimestampType timestampType = TimestampType.forName((String) config.getOrDefault(TIMESTAMP_TYPE_CONFIG, TimestampType.LOG_APPEND_TIME.toString()));
for (int i = 0; i < numElements; i++) {
int pIdx = i % numPartitions;
TopicPartition tp = partitions.get(pIdx);
byte[] key = keyFunction.apply(i);
byte[] value = valueFunction.apply(i);
records.get(tp).add(new ConsumerRecord<>(tp.topic(), tp.partition(), offsets[pIdx]++, timestampStartMillis + Duration.standardSeconds(i).getMillis(), timestampType, 0, key.length, value.length, key, value));
}
// This is updated when reader assigns partitions.
final AtomicReference<List<TopicPartition>> assignedPartitions = new AtomicReference<>(Collections.<TopicPartition>emptyList());
final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(offsetResetStrategy) {
@Override
public synchronized void assign(final Collection<TopicPartition> assigned) {
super.assign(assigned);
assignedPartitions.set(ImmutableList.copyOf(assigned));
for (TopicPartition tp : assigned) {
updateBeginningOffsets(ImmutableMap.of(tp, 0L));
updateEndOffsets(ImmutableMap.of(tp, (long) records.get(tp).size()));
}
}
// Override offsetsForTimes() in order to look up the offsets by timestamp.
@Override
public synchronized Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch) {
return timestampsToSearch.entrySet().stream().map(e -> {
// In test scope, timestamp == offset.
long maxOffset = offsets[partitions.indexOf(e.getKey())];
long offset = e.getValue();
OffsetAndTimestamp value = (offset >= maxOffset) ? null : new OffsetAndTimestamp(offset, offset);
return new SimpleEntry<>(e.getKey(), value);
}).collect(Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue));
}
};
for (String topic : topics) {
consumer.updatePartitions(topic, partitionMap.get(topic));
}
// MockConsumer does not maintain any relationship between partition seek position and the
// records added. e.g. if we add 10 records to a partition and then seek to end of the
// partition, MockConsumer is still going to return the 10 records in next poll. It is
// our responsibility to make sure currently enqueued records sync with partition offsets.
// The following task will be called inside each invocation to MockConsumer.poll().
// We enqueue only the records with the offset >= partition's current position.
Runnable recordEnqueueTask = new Runnable() {
@Override
public void run() {
// add all the records with offset >= current partition position.
int recordsAdded = 0;
for (TopicPartition tp : assignedPartitions.get()) {
long curPos = consumer.position(tp);
for (ConsumerRecord<byte[], byte[]> r : records.get(tp)) {
if (r.offset() >= curPos) {
consumer.addRecord(r);
recordsAdded++;
}
}
}
if (recordsAdded == 0) {
if (config.get("inject.error.at.eof") != null) {
consumer.setException(new KafkaException("Injected error in consumer.poll()"));
}
// MockConsumer.poll(timeout) does not actually wait even when there aren't any
// records.
// Add a small wait here in order to avoid busy looping in the reader.
Uninterruptibles.sleepUninterruptibly(10, TimeUnit.MILLISECONDS);
// TODO: BEAM-4086: testUnboundedSourceWithoutBoundedWrapper() occasionally hangs
// without this wait. Need to look into it.
}
consumer.schedulePollTask(this);
}
};
consumer.schedulePollTask(recordEnqueueTask);
return consumer;
}
use of org.apache.kafka.clients.consumer.MockConsumer in project beam by apache.
the class KafkaTestTable method mkMockConsumer.
private MockConsumer<byte[], byte[]> mkMockConsumer(Map<String, Object> config) {
OffsetResetStrategy offsetResetStrategy = OffsetResetStrategy.EARLIEST;
final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> kafkaRecords = new HashMap<>();
Map<String, List<PartitionInfo>> partitionInfoMap = new HashMap<>();
Map<String, List<TopicPartition>> partitionMap = new HashMap<>();
// Create Topic Paritions
for (String topic : this.getTopics()) {
List<PartitionInfo> partIds = new ArrayList<>(partitionsPerTopic);
List<TopicPartition> topicParitions = new ArrayList<>(partitionsPerTopic);
for (int i = 0; i < partitionsPerTopic; i++) {
TopicPartition tp = new TopicPartition(topic, i);
topicParitions.add(tp);
partIds.add(new PartitionInfo(topic, i, null, null, null));
kafkaRecords.put(tp, new ArrayList<>());
}
partitionInfoMap.put(topic, partIds);
partitionMap.put(topic, topicParitions);
}
TimestampType timestampType = TimestampType.forName((String) config.getOrDefault(TIMESTAMP_TYPE_CONFIG, TimestampType.LOG_APPEND_TIME.toString()));
for (KafkaTestRecord record : this.records) {
int partitionIndex = record.getKey().hashCode() % partitionsPerTopic;
TopicPartition tp = partitionMap.get(record.getTopic()).get(partitionIndex);
byte[] key = record.getKey().getBytes(UTF_8);
byte[] value = record.getValue().toByteArray();
kafkaRecords.get(tp).add(new ConsumerRecord<>(tp.topic(), tp.partition(), kafkaRecords.get(tp).size(), record.getTimeStamp(), timestampType, 0, key.length, value.length, key, value));
}
// This is updated when reader assigns partitions.
final AtomicReference<List<TopicPartition>> assignedPartitions = new AtomicReference<>(Collections.emptyList());
final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(offsetResetStrategy) {
@Override
public synchronized void assign(final Collection<TopicPartition> assigned) {
Collection<TopicPartition> realPartitions = assigned.stream().map(part -> partitionMap.get(part.topic()).get(part.partition())).collect(Collectors.toList());
super.assign(realPartitions);
assignedPartitions.set(ImmutableList.copyOf(realPartitions));
for (TopicPartition tp : realPartitions) {
updateBeginningOffsets(ImmutableMap.of(tp, 0L));
updateEndOffsets(ImmutableMap.of(tp, (long) kafkaRecords.get(tp).size()));
}
}
// Override offsetsForTimes() in order to look up the offsets by timestamp.
@Override
public synchronized Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch) {
return timestampsToSearch.entrySet().stream().map(e -> {
// In test scope, timestamp == offset. ????
long maxOffset = kafkaRecords.get(e.getKey()).size();
long offset = e.getValue();
OffsetAndTimestamp value = (offset >= maxOffset) ? null : new OffsetAndTimestamp(offset, offset);
return new AbstractMap.SimpleEntry<>(e.getKey(), value);
}).collect(Collectors.toMap(AbstractMap.SimpleEntry::getKey, AbstractMap.SimpleEntry::getValue));
}
};
for (String topic : getTopics()) {
consumer.updatePartitions(topic, partitionInfoMap.get(topic));
}
Runnable recordEnqueueTask = new Runnable() {
@Override
public void run() {
// add all the records with offset >= current partition position.
int recordsAdded = 0;
for (TopicPartition tp : assignedPartitions.get()) {
long curPos = consumer.position(tp);
for (ConsumerRecord<byte[], byte[]> r : kafkaRecords.get(tp)) {
if (r.offset() >= curPos) {
consumer.addRecord(r);
recordsAdded++;
}
}
}
if (recordsAdded == 0) {
if (config.get("inject.error.at.eof") != null) {
consumer.setException(new KafkaException("Injected error in consumer.poll()"));
}
// MockConsumer.poll(timeout) does not actually wait even when there aren't any
// records.
// Add a small wait here in order to avoid busy looping in the reader.
Uninterruptibles.sleepUninterruptibly(10, TimeUnit.MILLISECONDS);
}
consumer.schedulePollTask(this);
}
};
consumer.schedulePollTask(recordEnqueueTask);
return consumer;
}
Aggregations