Search in sources :

Example 71 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project flink by apache.

the class Kafka09FetcherTest method testCancellationWhenEmitBlocks.

@Test
public void testCancellationWhenEmitBlocks() throws Exception {
    // ----- some test data -----
    final String topic = "test-topic";
    final int partition = 3;
    final byte[] payload = new byte[] { 1, 2, 3, 4 };
    final List<ConsumerRecord<byte[], byte[]>> records = Arrays.asList(new ConsumerRecord<byte[], byte[]>(topic, partition, 15, payload, payload), new ConsumerRecord<byte[], byte[]>(topic, partition, 16, payload, payload), new ConsumerRecord<byte[], byte[]>(topic, partition, 17, payload, payload));
    final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> data = new HashMap<>();
    data.put(new TopicPartition(topic, partition), records);
    final ConsumerRecords<byte[], byte[]> consumerRecords = new ConsumerRecords<>(data);
    // ----- the test consumer -----
    final KafkaConsumer<?, ?> mockConsumer = mock(KafkaConsumer.class);
    when(mockConsumer.poll(anyLong())).thenAnswer(new Answer<ConsumerRecords<?, ?>>() {

        @Override
        public ConsumerRecords<?, ?> answer(InvocationOnMock invocation) {
            return consumerRecords;
        }
    });
    whenNew(KafkaConsumer.class).withAnyArguments().thenReturn(mockConsumer);
    // ----- build a fetcher -----
    BlockingSourceContext<String> sourceContext = new BlockingSourceContext<>();
    Map<KafkaTopicPartition, Long> partitionsWithInitialOffsets = Collections.singletonMap(new KafkaTopicPartition(topic, partition), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
    KeyedDeserializationSchema<String> schema = new KeyedDeserializationSchemaWrapper<>(new SimpleStringSchema());
    final Kafka09Fetcher<String> fetcher = new Kafka09Fetcher<>(sourceContext, partitionsWithInitialOffsets, null, /* periodic watermark extractor */
    null, /* punctuated watermark extractor */
    new TestProcessingTimeService(), 10, /* watermark interval */
    this.getClass().getClassLoader(), "task_name", new UnregisteredMetricsGroup(), schema, new Properties(), 0L, false);
    // ----- run the fetcher -----
    final AtomicReference<Throwable> error = new AtomicReference<>();
    final Thread fetcherRunner = new Thread("fetcher runner") {

        @Override
        public void run() {
            try {
                fetcher.runFetchLoop();
            } catch (Throwable t) {
                error.set(t);
            }
        }
    };
    fetcherRunner.start();
    // wait until the thread started to emit records to the source context
    sourceContext.waitTillHasBlocker();
    // now we try to cancel the fetcher, including the interruption usually done on the task thread
    // once it has finished, there must be no more thread blocked on the source context
    fetcher.cancel();
    fetcherRunner.interrupt();
    fetcherRunner.join();
    assertFalse("fetcher threads did not properly finish", sourceContext.isStillBlocking());
}
Also used : UnregisteredMetricsGroup(org.apache.flink.metrics.groups.UnregisteredMetricsGroup) HashMap(java.util.HashMap) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) Properties(java.util.Properties) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) KeyedDeserializationSchemaWrapper(org.apache.flink.streaming.util.serialization.KeyedDeserializationSchemaWrapper) List(java.util.List) AtomicReference(java.util.concurrent.atomic.AtomicReference) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) KafkaConsumerThread(org.apache.flink.streaming.connectors.kafka.internal.KafkaConsumerThread) Kafka09Fetcher(org.apache.flink.streaming.connectors.kafka.internal.Kafka09Fetcher) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Mockito.anyLong(org.mockito.Mockito.anyLong) SimpleStringSchema(org.apache.flink.streaming.util.serialization.SimpleStringSchema) TestProcessingTimeService(org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 72 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project open-kilda by telstra.

the class KafkaUtils method getStateDumpsFromBolts.

public DumpStateManager getStateDumpsFromBolts() {
    long timestamp = System.currentTimeMillis();
    String correlationId = String.format("atdd-%d", timestamp);
    CtrlRequest dumpRequest = new CtrlRequest("*", new RequestData("dump"), timestamp, correlationId, WFM_CTRL);
    try {
        RecordMetadata postedMessage = postMessage(settings.getControlTopic(), dumpRequest);
        KafkaConsumer<String, String> consumer = createConsumer();
        try {
            consumer.subscribe(Collections.singletonList(settings.getControlTopic()), new NoOpConsumerRebalanceListener() {

                @Override
                public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
                    System.out.println("Seek to offset: " + postedMessage.offset());
                    for (TopicPartition topicPartition : partitions) {
                        consumer.seek(topicPartition, postedMessage.offset());
                    }
                }
            });
            List<CtrlResponse> buffer = new ArrayList<>();
            final int BOLT_COUNT = 4;
            final int NUMBER_OF_ATTEMPTS = 5;
            int attempt = 0;
            while (buffer.size() < BOLT_COUNT && attempt++ < NUMBER_OF_ATTEMPTS) {
                for (ConsumerRecord<String, String> record : consumer.poll(1000)) {
                    System.out.println("Received message: (" + record.key() + ", " + record.value() + ") at offset " + record.offset());
                    Message message = MAPPER.readValue(record.value(), Message.class);
                    if (message.getDestination() == CTRL_CLIENT && message.getCorrelationId().equals(correlationId)) {
                        buffer.add((CtrlResponse) message);
                    }
                }
            }
            return DumpStateManager.fromResponsesList(buffer);
        } finally {
            consumer.close();
        }
    } catch (Exception e) {
        e.printStackTrace();
        return null;
    }
}
Also used : Message(org.openkilda.messaging.Message) CtrlRequest(org.openkilda.messaging.ctrl.CtrlRequest) ArrayList(java.util.ArrayList) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) CtrlResponse(org.openkilda.messaging.ctrl.CtrlResponse) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) RequestData(org.openkilda.messaging.ctrl.RequestData) TopicPartition(org.apache.kafka.common.TopicPartition) NoOpConsumerRebalanceListener(org.apache.kafka.clients.consumer.internals.NoOpConsumerRebalanceListener)

Example 73 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project nifi by apache.

the class ConsumerLease method writeData.

private void writeData(final ProcessSession session, ConsumerRecord<byte[], byte[]> record, final TopicPartition topicPartition) {
    FlowFile flowFile = session.create();
    final BundleTracker tracker = new BundleTracker(record, topicPartition, keyEncoding);
    tracker.incrementRecordCount(1);
    final byte[] value = record.value();
    if (value != null) {
        flowFile = session.write(flowFile, out -> {
            out.write(value);
        });
    }
    flowFile = session.putAllAttributes(flowFile, getAttributes(record));
    tracker.updateFlowFile(flowFile);
    populateAttributes(tracker);
    session.transfer(tracker.flowFile, REL_SUCCESS);
}
Also used : KafkaException(org.apache.kafka.common.KafkaException) HashMap(java.util.HashMap) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ComponentLog(org.apache.nifi.logging.ComponentLog) ProcessException(org.apache.nifi.processor.exception.ProcessException) ArrayList(java.util.ArrayList) UTF8_ENCODING(org.apache.nifi.processors.kafka.pubsub.KafkaProcessorUtils.UTF8_ENCODING) RecordSchema(org.apache.nifi.serialization.record.RecordSchema) ByteArrayInputStream(java.io.ByteArrayInputStream) Charset(java.nio.charset.Charset) RecordReader(org.apache.nifi.serialization.RecordReader) Map(java.util.Map) Record(org.apache.nifi.serialization.record.Record) OutputStream(java.io.OutputStream) Consumer(org.apache.kafka.clients.consumer.Consumer) TopicPartition(org.apache.kafka.common.TopicPartition) HEX_ENCODING(org.apache.nifi.processors.kafka.pubsub.KafkaProcessorUtils.HEX_ENCODING) FlowFile(org.apache.nifi.flowfile.FlowFile) Collection(java.util.Collection) WriteResult(org.apache.nifi.serialization.WriteResult) REL_PARSE_FAILURE(org.apache.nifi.processors.kafka.pubsub.ConsumeKafkaRecord_1_0.REL_PARSE_FAILURE) IOException(java.io.IOException) ProcessSession(org.apache.nifi.processor.ProcessSession) RecordSetWriterFactory(org.apache.nifi.serialization.RecordSetWriterFactory) Collectors(java.util.stream.Collectors) StandardCharsets(java.nio.charset.StandardCharsets) Objects(java.util.Objects) TimeUnit(java.util.concurrent.TimeUnit) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) List(java.util.List) RecordReaderFactory(org.apache.nifi.serialization.RecordReaderFactory) REL_SUCCESS(org.apache.nifi.processors.kafka.pubsub.ConsumeKafkaRecord_1_0.REL_SUCCESS) Header(org.apache.kafka.common.header.Header) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Closeable(java.io.Closeable) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Pattern(java.util.regex.Pattern) CoreAttributes(org.apache.nifi.flowfile.attributes.CoreAttributes) DatatypeConverter(javax.xml.bind.DatatypeConverter) RecordSetWriter(org.apache.nifi.serialization.RecordSetWriter) InputStream(java.io.InputStream) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) FlowFile(org.apache.nifi.flowfile.FlowFile)

Example 74 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project pancm_project by xuwujing.

the class KafkaConsumerTest3 method run.

@Override
public void run() {
    System.out.println("---------开始消费---------");
    int messageNo = 1;
    List<String> list = new ArrayList<String>();
    List<Long> list2 = new ArrayList<Long>();
    TopicPartition p = new TopicPartition(topic, 0);
    consumer.assign(Arrays.asList(p));
    // 指定分区和offset进行消费
    consumer.seek(p, 0);
    try {
        for (; ; ) {
            msgList = consumer.poll(100);
            if (null != msgList && msgList.count() > 0) {
                int tmpPartId = 0;
                for (ConsumerRecord<String, String> record : msgList) {
                    if (messageNo % 10 == 0) {
                        System.out.println(messageNo + "=======receive: partId =" + tmpPartId + ", key = " + record.key() + ", value = " + record.value() + " offset===" + record.offset());
                    }
                }
            // 手动提交
            // consumer.commitSync();
            } else {
                Thread.sleep(1000);
                System.out.println("...");
            }
        }
    } catch (InterruptedException e) {
        e.printStackTrace();
    } finally {
        consumer.close();
    }
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList)

Example 75 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project streamsx.kafka by IBMStreams.

the class KafkaConsumerClient method refreshFromCluster.

private void refreshFromCluster() {
    // $NON-NLS-1$
    logger.debug("Refreshing from cluster...");
    List<String> topics = offsetManager.getTopics();
    Map<TopicPartition, Long> startOffsetMap = new HashMap<TopicPartition, Long>();
    for (String topic : topics) {
        List<PartitionInfo> parts = consumer.partitionsFor(topic);
        parts.forEach(pi -> {
            // otherwise only retrieve offsets for the user-specified partitions
            if (partitions.isEmpty() || partitions.contains(pi.partition())) {
                TopicPartition tp = new TopicPartition(pi.topic(), pi.partition());
                long startOffset = offsetManager.getOffset(pi.topic(), pi.partition());
                if (startOffset > -1l) {
                    startOffsetMap.put(tp, startOffset);
                }
            }
        });
    }
    // $NON-NLS-1$
    logger.debug("startOffsets=" + startOffsetMap);
    // assign the consumer to the partitions and seek to the
    // last saved offset
    consumer.assign(startOffsetMap.keySet());
    for (Entry<TopicPartition, Long> entry : startOffsetMap.entrySet()) {
        // $NON-NLS-1$ //$NON-NLS-2$
        logger.debug("Consumer seeking: TopicPartition=" + entry.getKey() + ", new_offset=" + entry.getValue());
        consumer.seek(entry.getKey(), entry.getValue());
    }
}
Also used : HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionInfo(org.apache.kafka.common.PartitionInfo)

Aggregations

TopicPartition (org.apache.kafka.common.TopicPartition)1729 HashMap (java.util.HashMap)744 Test (org.junit.Test)519 ArrayList (java.util.ArrayList)416 Map (java.util.Map)361 Test (org.junit.jupiter.api.Test)347 HashSet (java.util.HashSet)281 List (java.util.List)260 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)246 Set (java.util.Set)189 LinkedHashMap (java.util.LinkedHashMap)180 PartitionInfo (org.apache.kafka.common.PartitionInfo)170 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)155 TaskId (org.apache.kafka.streams.processor.TaskId)145 Node (org.apache.kafka.common.Node)140 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)109 KafkaException (org.apache.kafka.common.KafkaException)105 Errors (org.apache.kafka.common.protocol.Errors)105 ByteBuffer (java.nio.ByteBuffer)99 Properties (java.util.Properties)93