use of org.apache.kafka.clients.consumer.ConsumerRecords in project kafka by apache.
the class GlobalStateManagerImplTest method shouldUsePollMsPlusRequestTimeoutInPollDuringRestoreAndTimeoutWhenNoProgressDuringRestore.
@Test
public void shouldUsePollMsPlusRequestTimeoutInPollDuringRestoreAndTimeoutWhenNoProgressDuringRestore() {
consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {
@Override
public synchronized ConsumerRecords<byte[], byte[]> poll(final Duration timeout) {
time.sleep(timeout.toMillis());
return super.poll(timeout);
}
};
final HashMap<TopicPartition, Long> startOffsets = new HashMap<>();
startOffsets.put(t1, 1L);
final HashMap<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(t1, 3L);
consumer.updatePartitions(t1.topic(), Collections.singletonList(new PartitionInfo(t1.topic(), t1.partition(), null, null, null)));
consumer.assign(Collections.singletonList(t1));
consumer.updateBeginningOffsets(startOffsets);
consumer.updateEndOffsets(endOffsets);
streamsConfig = new StreamsConfig(mkMap(mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, "appId"), mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234"), mkEntry(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath())));
stateManager = new GlobalStateManagerImpl(new LogContext("mock"), time, topology, consumer, stateDirectory, stateRestoreListener, streamsConfig);
processorContext.setStateManger(stateManager);
stateManager.setGlobalProcessorContext(processorContext);
final long startTime = time.milliseconds();
final TimeoutException exception = assertThrows(TimeoutException.class, () -> stateManager.initialize());
assertThat(exception.getMessage(), equalTo("Global task did not make progress to restore state within 301000 ms. Adjust `task.timeout.ms` if needed."));
assertThat(time.milliseconds() - startTime, equalTo(331_100L));
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project incubator-atlas by apache.
the class KafkaConsumerTest method testNextVersionMismatch.
@Test
public void testNextVersionMismatch() throws Exception {
MessageAndMetadata<String, String> messageAndMetadata = mock(MessageAndMetadata.class);
Referenceable entity = getEntity(TRAIT_NAME);
HookNotification.EntityUpdateRequest message = new HookNotification.EntityUpdateRequest("user1", entity);
String json = AbstractNotification.GSON.toJson(new VersionedMessage<>(new MessageVersion("2.0.0"), message));
kafkaConsumer.assign(Arrays.asList(new TopicPartition("ATLAS_HOOK", 0)));
List<ConsumerRecord> klist = new ArrayList<>();
klist.add(new ConsumerRecord<String, String>("ATLAS_HOOK", 0, 0L, "mykey", json));
TopicPartition tp = new TopicPartition("ATLAS_HOOK", 0);
Map mp = new HashMap();
mp.put(tp, klist);
ConsumerRecords records = new ConsumerRecords(mp);
when(kafkaConsumer.poll(100L)).thenReturn(records);
when(messageAndMetadata.message()).thenReturn(json);
AtlasKafkaConsumer consumer = new AtlasKafkaConsumer(NotificationInterface.NotificationType.HOOK.getDeserializer(), kafkaConsumer, false, 100L);
try {
List<AtlasKafkaMessage<HookNotification.HookNotificationMessage>> messageList = consumer.receive();
assertTrue(messageList.size() > 0);
HookNotification.HookNotificationMessage consumedMessage = messageList.get(0).getMessage();
fail("Expected VersionMismatchException!");
} catch (IncompatibleVersionException e) {
e.printStackTrace();
}
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project incubator-atlas by apache.
the class KafkaConsumerTest method testReceive.
@Test
public void testReceive() throws Exception {
MessageAndMetadata<String, String> messageAndMetadata = mock(MessageAndMetadata.class);
Referenceable entity = getEntity(TRAIT_NAME);
HookNotification.EntityUpdateRequest message = new HookNotification.EntityUpdateRequest("user1", entity);
String json = AbstractNotification.GSON.toJson(new VersionedMessage<>(new MessageVersion("1.0.0"), message));
kafkaConsumer.assign(Arrays.asList(new TopicPartition("ATLAS_HOOK", 0)));
List<ConsumerRecord> klist = new ArrayList<>();
klist.add(new ConsumerRecord<String, String>("ATLAS_HOOK", 0, 0L, "mykey", json));
TopicPartition tp = new TopicPartition("ATLAS_HOOK", 0);
Map mp = new HashMap();
mp.put(tp, klist);
ConsumerRecords records = new ConsumerRecords(mp);
when(kafkaConsumer.poll(100)).thenReturn(records);
when(messageAndMetadata.message()).thenReturn(json);
AtlasKafkaConsumer consumer = new AtlasKafkaConsumer(NotificationInterface.NotificationType.HOOK.getDeserializer(), kafkaConsumer, false, 100L);
List<AtlasKafkaMessage<HookNotification.HookNotificationMessage>> messageList = consumer.receive();
assertTrue(messageList.size() > 0);
HookNotification.HookNotificationMessage consumedMessage = messageList.get(0).getMessage();
assertMessagesEqual(message, consumedMessage, entity);
}
Aggregations