use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.
the class ProcessorStateManager method updateStandbyStates.
List<ConsumerRecord<byte[], byte[]>> updateStandbyStates(final TopicPartition storePartition, final List<ConsumerRecord<byte[], byte[]>> records) {
final long limit = offsetLimit(storePartition);
List<ConsumerRecord<byte[], byte[]>> remainingRecords = null;
final List<KeyValue<byte[], byte[]>> restoreRecords = new ArrayList<>();
// restore states from changelog records
final BatchingStateRestoreCallback restoreCallback = getBatchingRestoreCallback(restoreCallbacks.get(storePartition.topic()));
long lastOffset = -1L;
int count = 0;
for (final ConsumerRecord<byte[], byte[]> record : records) {
if (record.offset() < limit) {
restoreRecords.add(KeyValue.pair(record.key(), record.value()));
lastOffset = record.offset();
} else {
if (remainingRecords == null) {
remainingRecords = new ArrayList<>(records.size() - count);
}
remainingRecords.add(record);
}
count++;
}
if (!restoreRecords.isEmpty()) {
try {
restoreCallback.restoreAll(restoreRecords);
} catch (final Exception e) {
throw new ProcessorStateException(String.format("%sException caught while trying to restore state from %s", logPrefix, storePartition), e);
}
}
// record the restored offset for its change log partition
restoredOffsets.put(storePartition, lastOffset + 1);
return remainingRecords;
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project incubator-rya by apache.
the class LoadStatementsCommandIT method shortParams.
@Test
public void shortParams() throws Exception {
// Load a file of statements into Kafka.
final String visibilities = "a|b|c";
final String[] args = new String[] { "-r", "" + ryaInstance, "-i", kafka.getKafkaHostname(), "-p", kafka.getKafkaPort(), "-f", TURTLE_FILE.toString(), "-v", visibilities };
// Load the file of statements into the Statements topic.
new LoadStatementsCommand().execute(args);
// Show that the statements were loaded into the topic.
final List<VisibilityStatement> read = new ArrayList<>();
try (final Consumer<String, VisibilityStatement> consumer = KafkaTestUtil.fromStartConsumer(kafka, StringDeserializer.class, VisibilityStatementDeserializer.class)) {
// Subscribe for messages.
consumer.subscribe(Arrays.asList(KafkaTopics.statementsTopic(ryaInstance)));
// Read the messages and extract their values.
final Iterator<ConsumerRecord<String, VisibilityStatement>> iter = consumer.poll(3000).iterator();
while (iter.hasNext()) {
read.add(iter.next().value());
}
}
final ValueFactory VF = ValueFactoryImpl.getInstance();
final List<VisibilityStatement> expected = new ArrayList<>();
expected.add(new VisibilityStatement(VF.createStatement(VF.createURI("http://example#alice"), VF.createURI("http://example#talksTo"), VF.createURI("http://example#bob")), visibilities));
expected.add(new VisibilityStatement(VF.createStatement(VF.createURI("http://example#bob"), VF.createURI("http://example#talksTo"), VF.createURI("http://example#charlie")), visibilities));
expected.add(new VisibilityStatement(VF.createStatement(VF.createURI("http://example#charlie"), VF.createURI("http://example#likes"), VF.createURI("http://example#icecream")), visibilities));
// Show the written statements matches the read ones.
assertEquals(expected, read);
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project incubator-rya by apache.
the class KafkaExportIT method readGroupedResults.
private Set<VisibilityBindingSet> readGroupedResults(final String pcjId, final VariableOrder groupByVars) {
requireNonNull(pcjId);
// Read the results from the Kafka topic. The last one for each set of Group By values is an aggregation result.
// The key in this map is a Binding Set containing only the group by variables.
final Map<BindingSet, VisibilityBindingSet> results = new HashMap<>();
try (final KafkaConsumer<String, VisibilityBindingSet> consumer = makeConsumer(pcjId)) {
final ConsumerRecords<String, VisibilityBindingSet> records = consumer.poll(5000);
final Iterator<ConsumerRecord<String, VisibilityBindingSet>> recordIterator = records.iterator();
while (recordIterator.hasNext()) {
final VisibilityBindingSet visBindingSet = recordIterator.next().value();
final MapBindingSet key = new MapBindingSet();
for (final String groupByBar : groupByVars) {
key.addBinding(visBindingSet.getBinding(groupByBar));
}
results.put(key, visBindingSet);
}
}
return Sets.newHashSet(results.values());
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project incubator-skywalking by apache.
the class KafkaConsumerInterceptor method afterMethod.
@Override
public Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class<?>[] argumentsTypes, Object ret) throws Throwable {
Map<TopicPartition, List<ConsumerRecord<?, ?>>> records = (Map<TopicPartition, List<ConsumerRecord<?, ?>>>) ret;
//
if (records.size() > 0) {
ConsumerEnhanceRequiredInfo requiredInfo = (ConsumerEnhanceRequiredInfo) objInst.getSkyWalkingDynamicField();
AbstractSpan activeSpan = ContextManager.createEntrySpan(OPERATE_NAME_PREFIX + requiredInfo.getTopics() + CONSUMER_OPERATE_NAME_SUFFIX, null).start(requiredInfo.getStartTime());
activeSpan.setComponent(ComponentsDefine.KAFKA);
SpanLayer.asMQ(activeSpan);
Tags.MQ_BROKER.set(activeSpan, requiredInfo.getBrokerServers());
Tags.MQ_TOPIC.set(activeSpan, requiredInfo.getTopics());
for (List<ConsumerRecord<?, ?>> consumerRecords : records.values()) {
for (ConsumerRecord<?, ?> record : consumerRecords) {
ContextCarrier contextCarrier = new ContextCarrier();
CarrierItem next = contextCarrier.items();
while (next.hasNext()) {
next = next.next();
Iterator<Header> iterator = record.headers().headers(next.getHeadKey()).iterator();
if (iterator.hasNext()) {
next.setHeadValue(new String(iterator.next().value()));
}
}
ContextManager.extract(contextCarrier);
}
}
ContextManager.stopSpan();
}
return ret;
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project eventuate-tram-core by eventuate-tram.
the class MessageConsumerKafkaImpl method subscribe.
@Override
public void subscribe(String subscriberId, Set<String> channels, MessageHandler handler) {
BiConsumer<ConsumerRecord<String, String>, BiConsumer<Void, Throwable>> kcHandler = (record, callback) -> {
Message m = toMessage(record);
// TODO If we do that here then remove TT from higher-levels
transactionTemplate.execute(ts -> {
if (duplicateMessageDetector.isDuplicate(subscriberId, m.getId())) {
logger.trace("Duplicate message {} {}", subscriberId, m.getId());
callback.accept(null, null);
return null;
}
try {
logger.trace("Invoking handler {} {}", subscriberId, m.getId());
handler.accept(m);
} catch (Throwable t) {
logger.trace("Got exception {} {}", subscriberId, m.getId());
logger.trace("Got exception ", t);
callback.accept(null, t);
return null;
}
logger.trace("handled message {} {}", subscriberId, m.getId());
callback.accept(null, null);
return null;
});
};
EventuateKafkaConsumer kc = new EventuateKafkaConsumer(subscriberId, kcHandler, new ArrayList<>(channels), bootstrapServers);
consumers.add(kc);
kc.start();
}
Aggregations