use of org.apache.kafka.common.TopicPartition in project flink by apache.
the class Kafka09FetcherTest method testCancellationWhenEmitBlocks.
@Test
public void testCancellationWhenEmitBlocks() throws Exception {
// ----- some test data -----
final String topic = "test-topic";
final int partition = 3;
final byte[] payload = new byte[] { 1, 2, 3, 4 };
final List<ConsumerRecord<byte[], byte[]>> records = Arrays.asList(new ConsumerRecord<byte[], byte[]>(topic, partition, 15, payload, payload), new ConsumerRecord<byte[], byte[]>(topic, partition, 16, payload, payload), new ConsumerRecord<byte[], byte[]>(topic, partition, 17, payload, payload));
final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> data = new HashMap<>();
data.put(new TopicPartition(topic, partition), records);
final ConsumerRecords<byte[], byte[]> consumerRecords = new ConsumerRecords<>(data);
// ----- the test consumer -----
final KafkaConsumer<?, ?> mockConsumer = mock(KafkaConsumer.class);
when(mockConsumer.poll(anyLong())).thenAnswer(new Answer<ConsumerRecords<?, ?>>() {
@Override
public ConsumerRecords<?, ?> answer(InvocationOnMock invocation) {
return consumerRecords;
}
});
whenNew(KafkaConsumer.class).withAnyArguments().thenReturn(mockConsumer);
// ----- build a fetcher -----
BlockingSourceContext<String> sourceContext = new BlockingSourceContext<>();
Map<KafkaTopicPartition, Long> partitionsWithInitialOffsets = Collections.singletonMap(new KafkaTopicPartition(topic, partition), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
KeyedDeserializationSchema<String> schema = new KeyedDeserializationSchemaWrapper<>(new SimpleStringSchema());
final Kafka09Fetcher<String> fetcher = new Kafka09Fetcher<>(sourceContext, partitionsWithInitialOffsets, null, /* periodic watermark extractor */
null, /* punctuated watermark extractor */
new TestProcessingTimeService(), 10, /* watermark interval */
this.getClass().getClassLoader(), "task_name", new UnregisteredMetricsGroup(), schema, new Properties(), 0L, false);
// ----- run the fetcher -----
final AtomicReference<Throwable> error = new AtomicReference<>();
final Thread fetcherRunner = new Thread("fetcher runner") {
@Override
public void run() {
try {
fetcher.runFetchLoop();
} catch (Throwable t) {
error.set(t);
}
}
};
fetcherRunner.start();
// wait until the thread started to emit records to the source context
sourceContext.waitTillHasBlocker();
// now we try to cancel the fetcher, including the interruption usually done on the task thread
// once it has finished, there must be no more thread blocked on the source context
fetcher.cancel();
fetcherRunner.interrupt();
fetcherRunner.join();
assertFalse("fetcher threads did not properly finish", sourceContext.isStillBlocking());
}
use of org.apache.kafka.common.TopicPartition in project open-kilda by telstra.
the class KafkaUtils method getStateDumpsFromBolts.
public DumpStateManager getStateDumpsFromBolts() {
long timestamp = System.currentTimeMillis();
String correlationId = String.format("atdd-%d", timestamp);
CtrlRequest dumpRequest = new CtrlRequest("*", new RequestData("dump"), timestamp, correlationId, WFM_CTRL);
try {
RecordMetadata postedMessage = postMessage(settings.getControlTopic(), dumpRequest);
KafkaConsumer<String, String> consumer = createConsumer();
try {
consumer.subscribe(Collections.singletonList(settings.getControlTopic()), new NoOpConsumerRebalanceListener() {
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
System.out.println("Seek to offset: " + postedMessage.offset());
for (TopicPartition topicPartition : partitions) {
consumer.seek(topicPartition, postedMessage.offset());
}
}
});
List<CtrlResponse> buffer = new ArrayList<>();
final int BOLT_COUNT = 4;
final int NUMBER_OF_ATTEMPTS = 5;
int attempt = 0;
while (buffer.size() < BOLT_COUNT && attempt++ < NUMBER_OF_ATTEMPTS) {
for (ConsumerRecord<String, String> record : consumer.poll(1000)) {
System.out.println("Received message: (" + record.key() + ", " + record.value() + ") at offset " + record.offset());
Message message = MAPPER.readValue(record.value(), Message.class);
if (message.getDestination() == CTRL_CLIENT && message.getCorrelationId().equals(correlationId)) {
buffer.add((CtrlResponse) message);
}
}
}
return DumpStateManager.fromResponsesList(buffer);
} finally {
consumer.close();
}
} catch (Exception e) {
e.printStackTrace();
return null;
}
}
use of org.apache.kafka.common.TopicPartition in project nifi by apache.
the class ConsumerLease method writeData.
private void writeData(final ProcessSession session, ConsumerRecord<byte[], byte[]> record, final TopicPartition topicPartition) {
FlowFile flowFile = session.create();
final BundleTracker tracker = new BundleTracker(record, topicPartition, keyEncoding);
tracker.incrementRecordCount(1);
final byte[] value = record.value();
if (value != null) {
flowFile = session.write(flowFile, out -> {
out.write(value);
});
}
flowFile = session.putAllAttributes(flowFile, getAttributes(record));
tracker.updateFlowFile(flowFile);
populateAttributes(tracker);
session.transfer(tracker.flowFile, REL_SUCCESS);
}
use of org.apache.kafka.common.TopicPartition in project pancm_project by xuwujing.
the class KafkaConsumerTest3 method run.
@Override
public void run() {
System.out.println("---------开始消费---------");
int messageNo = 1;
List<String> list = new ArrayList<String>();
List<Long> list2 = new ArrayList<Long>();
TopicPartition p = new TopicPartition(topic, 0);
consumer.assign(Arrays.asList(p));
// 指定分区和offset进行消费
consumer.seek(p, 0);
try {
for (; ; ) {
msgList = consumer.poll(100);
if (null != msgList && msgList.count() > 0) {
int tmpPartId = 0;
for (ConsumerRecord<String, String> record : msgList) {
if (messageNo % 10 == 0) {
System.out.println(messageNo + "=======receive: partId =" + tmpPartId + ", key = " + record.key() + ", value = " + record.value() + " offset===" + record.offset());
}
}
// 手动提交
// consumer.commitSync();
} else {
Thread.sleep(1000);
System.out.println("...");
}
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
consumer.close();
}
}
use of org.apache.kafka.common.TopicPartition in project streamsx.kafka by IBMStreams.
the class KafkaConsumerClient method refreshFromCluster.
private void refreshFromCluster() {
// $NON-NLS-1$
logger.debug("Refreshing from cluster...");
List<String> topics = offsetManager.getTopics();
Map<TopicPartition, Long> startOffsetMap = new HashMap<TopicPartition, Long>();
for (String topic : topics) {
List<PartitionInfo> parts = consumer.partitionsFor(topic);
parts.forEach(pi -> {
// otherwise only retrieve offsets for the user-specified partitions
if (partitions.isEmpty() || partitions.contains(pi.partition())) {
TopicPartition tp = new TopicPartition(pi.topic(), pi.partition());
long startOffset = offsetManager.getOffset(pi.topic(), pi.partition());
if (startOffset > -1l) {
startOffsetMap.put(tp, startOffset);
}
}
});
}
// $NON-NLS-1$
logger.debug("startOffsets=" + startOffsetMap);
// assign the consumer to the partitions and seek to the
// last saved offset
consumer.assign(startOffsetMap.keySet());
for (Entry<TopicPartition, Long> entry : startOffsetMap.entrySet()) {
// $NON-NLS-1$ //$NON-NLS-2$
logger.debug("Consumer seeking: TopicPartition=" + entry.getKey() + ", new_offset=" + entry.getValue());
consumer.seek(entry.getKey(), entry.getValue());
}
}
Aggregations