Search in sources :

Example 56 with ConsumerRecords

use of org.apache.kafka.clients.consumer.ConsumerRecords in project kafka by apache.

the class GlobalStateManagerImplTest method shouldUsePollMsPlusRequestTimeoutInPollDuringRestoreAndTimeoutWhenNoProgressDuringRestore.

@Test
public void shouldUsePollMsPlusRequestTimeoutInPollDuringRestoreAndTimeoutWhenNoProgressDuringRestore() {
    consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {

        @Override
        public synchronized ConsumerRecords<byte[], byte[]> poll(final Duration timeout) {
            time.sleep(timeout.toMillis());
            return super.poll(timeout);
        }
    };
    final HashMap<TopicPartition, Long> startOffsets = new HashMap<>();
    startOffsets.put(t1, 1L);
    final HashMap<TopicPartition, Long> endOffsets = new HashMap<>();
    endOffsets.put(t1, 3L);
    consumer.updatePartitions(t1.topic(), Collections.singletonList(new PartitionInfo(t1.topic(), t1.partition(), null, null, null)));
    consumer.assign(Collections.singletonList(t1));
    consumer.updateBeginningOffsets(startOffsets);
    consumer.updateEndOffsets(endOffsets);
    streamsConfig = new StreamsConfig(mkMap(mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, "appId"), mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234"), mkEntry(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath())));
    stateManager = new GlobalStateManagerImpl(new LogContext("mock"), time, topology, consumer, stateDirectory, stateRestoreListener, streamsConfig);
    processorContext.setStateManger(stateManager);
    stateManager.setGlobalProcessorContext(processorContext);
    final long startTime = time.milliseconds();
    final TimeoutException exception = assertThrows(TimeoutException.class, () -> stateManager.initialize());
    assertThat(exception.getMessage(), equalTo("Global task did not make progress to restore state within 301000 ms. Adjust `task.timeout.ms` if needed."));
    assertThat(time.milliseconds() - startTime, equalTo(331_100L));
}
Also used : HashMap(java.util.HashMap) LogContext(org.apache.kafka.common.utils.LogContext) Duration(java.time.Duration) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionInfo(org.apache.kafka.common.PartitionInfo) StreamsConfig(org.apache.kafka.streams.StreamsConfig) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Test(org.junit.Test)

Example 57 with ConsumerRecords

use of org.apache.kafka.clients.consumer.ConsumerRecords in project incubator-atlas by apache.

the class KafkaConsumerTest method testNextVersionMismatch.

@Test
public void testNextVersionMismatch() throws Exception {
    MessageAndMetadata<String, String> messageAndMetadata = mock(MessageAndMetadata.class);
    Referenceable entity = getEntity(TRAIT_NAME);
    HookNotification.EntityUpdateRequest message = new HookNotification.EntityUpdateRequest("user1", entity);
    String json = AbstractNotification.GSON.toJson(new VersionedMessage<>(new MessageVersion("2.0.0"), message));
    kafkaConsumer.assign(Arrays.asList(new TopicPartition("ATLAS_HOOK", 0)));
    List<ConsumerRecord> klist = new ArrayList<>();
    klist.add(new ConsumerRecord<String, String>("ATLAS_HOOK", 0, 0L, "mykey", json));
    TopicPartition tp = new TopicPartition("ATLAS_HOOK", 0);
    Map mp = new HashMap();
    mp.put(tp, klist);
    ConsumerRecords records = new ConsumerRecords(mp);
    when(kafkaConsumer.poll(100L)).thenReturn(records);
    when(messageAndMetadata.message()).thenReturn(json);
    AtlasKafkaConsumer consumer = new AtlasKafkaConsumer(NotificationInterface.NotificationType.HOOK.getDeserializer(), kafkaConsumer, false, 100L);
    try {
        List<AtlasKafkaMessage<HookNotification.HookNotificationMessage>> messageList = consumer.receive();
        assertTrue(messageList.size() > 0);
        HookNotification.HookNotificationMessage consumedMessage = messageList.get(0).getMessage();
        fail("Expected VersionMismatchException!");
    } catch (IncompatibleVersionException e) {
        e.printStackTrace();
    }
}
Also used : MessageVersion(org.apache.atlas.notification.MessageVersion) HashMap(java.util.HashMap) IncompatibleVersionException(org.apache.atlas.notification.IncompatibleVersionException) ArrayList(java.util.ArrayList) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) HookNotification(org.apache.atlas.notification.hook.HookNotification) Referenceable(org.apache.atlas.typesystem.Referenceable) TopicPartition(org.apache.kafka.common.TopicPartition) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.testng.annotations.Test) EntityNotificationImplTest(org.apache.atlas.notification.entity.EntityNotificationImplTest)

Example 58 with ConsumerRecords

use of org.apache.kafka.clients.consumer.ConsumerRecords in project incubator-atlas by apache.

the class KafkaConsumerTest method testReceive.

@Test
public void testReceive() throws Exception {
    MessageAndMetadata<String, String> messageAndMetadata = mock(MessageAndMetadata.class);
    Referenceable entity = getEntity(TRAIT_NAME);
    HookNotification.EntityUpdateRequest message = new HookNotification.EntityUpdateRequest("user1", entity);
    String json = AbstractNotification.GSON.toJson(new VersionedMessage<>(new MessageVersion("1.0.0"), message));
    kafkaConsumer.assign(Arrays.asList(new TopicPartition("ATLAS_HOOK", 0)));
    List<ConsumerRecord> klist = new ArrayList<>();
    klist.add(new ConsumerRecord<String, String>("ATLAS_HOOK", 0, 0L, "mykey", json));
    TopicPartition tp = new TopicPartition("ATLAS_HOOK", 0);
    Map mp = new HashMap();
    mp.put(tp, klist);
    ConsumerRecords records = new ConsumerRecords(mp);
    when(kafkaConsumer.poll(100)).thenReturn(records);
    when(messageAndMetadata.message()).thenReturn(json);
    AtlasKafkaConsumer consumer = new AtlasKafkaConsumer(NotificationInterface.NotificationType.HOOK.getDeserializer(), kafkaConsumer, false, 100L);
    List<AtlasKafkaMessage<HookNotification.HookNotificationMessage>> messageList = consumer.receive();
    assertTrue(messageList.size() > 0);
    HookNotification.HookNotificationMessage consumedMessage = messageList.get(0).getMessage();
    assertMessagesEqual(message, consumedMessage, entity);
}
Also used : MessageVersion(org.apache.atlas.notification.MessageVersion) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) HookNotification(org.apache.atlas.notification.hook.HookNotification) Referenceable(org.apache.atlas.typesystem.Referenceable) TopicPartition(org.apache.kafka.common.TopicPartition) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.testng.annotations.Test) EntityNotificationImplTest(org.apache.atlas.notification.entity.EntityNotificationImplTest)

Aggregations

ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)58 TopicPartition (org.apache.kafka.common.TopicPartition)48 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)40 HashMap (java.util.HashMap)33 ArrayList (java.util.ArrayList)29 List (java.util.List)24 Test (org.junit.Test)24 Map (java.util.Map)19 Properties (java.util.Properties)14 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)13 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)12 Collection (java.util.Collection)10 AtomicReference (java.util.concurrent.atomic.AtomicReference)9 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)9 Collections (java.util.Collections)8 Duration (java.time.Duration)7 HashSet (java.util.HashSet)7 Collectors (java.util.stream.Collectors)7 Set (java.util.Set)6 UnregisteredMetricsGroup (org.apache.flink.metrics.groups.UnregisteredMetricsGroup)5