use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.
the class InMemorySessionStore method init.
@Deprecated
@Override
public void init(final ProcessorContext context, final StateStore root) {
final String threadId = Thread.currentThread().getName();
final String taskName = context.taskId().toString();
// If it doesn't, we can't record this metric.
if (context instanceof InternalProcessorContext) {
this.context = (InternalProcessorContext) context;
final StreamsMetricsImpl metrics = this.context.metrics();
expiredRecordSensor = TaskMetrics.droppedRecordsSensor(threadId, taskName, metrics);
} else {
this.context = null;
expiredRecordSensor = null;
}
if (root != null) {
final boolean consistencyEnabled = StreamsConfig.InternalConfig.getBoolean(context.appConfigs(), IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED, false);
context.register(root, (RecordBatchingStateRestoreCallback) records -> {
for (final ConsumerRecord<byte[], byte[]> record : records) {
put(SessionKeySchema.from(Bytes.wrap(record.key())), record.value());
ChangelogRecordDeserializationHelper.applyChecksAndUpdatePosition(record, consistencyEnabled, position);
}
});
}
open = true;
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.
the class RocksDBStoreTest method shouldRestoreRecordsAndConsistencyVectorMultipleTopics.
@Test
public void shouldRestoreRecordsAndConsistencyVectorMultipleTopics() {
final List<ConsumerRecord<byte[], byte[]>> entries = getChangelogRecordsMultipleTopics();
final Properties props = StreamsTestUtils.getStreamsConfig();
props.put(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG, MockRocksDbConfigSetter.class);
props.put(InternalConfig.IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED, true);
dir = TestUtils.tempDirectory();
context = new InternalMockProcessorContext<>(dir, Serdes.String(), Serdes.String(), new StreamsConfig(props));
rocksDBStore.init((StateStoreContext) context, rocksDBStore);
context.restoreWithHeaders(rocksDBStore.name(), entries);
assertEquals("a", stringDeserializer.deserialize(null, rocksDBStore.get(new Bytes(stringSerializer.serialize(null, "1")))));
assertEquals("b", stringDeserializer.deserialize(null, rocksDBStore.get(new Bytes(stringSerializer.serialize(null, "2")))));
assertEquals("c", stringDeserializer.deserialize(null, rocksDBStore.get(new Bytes(stringSerializer.serialize(null, "3")))));
assertThat(rocksDBStore.getPosition(), Matchers.notNullValue());
assertThat(rocksDBStore.getPosition().getPartitionPositions("A"), Matchers.notNullValue());
assertThat(rocksDBStore.getPosition().getPartitionPositions("A"), hasEntry(0, 3L));
assertThat(rocksDBStore.getPosition().getPartitionPositions("B"), Matchers.notNullValue());
assertThat(rocksDBStore.getPosition().getPartitionPositions("B"), hasEntry(0, 2L));
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.
the class TransactionalMessageCopier method runEventLoop.
public static void runEventLoop(Namespace parsedArgs) {
final String transactionalId = parsedArgs.getString("transactionalId");
final String outputTopic = parsedArgs.getString("outputTopic");
String consumerGroup = parsedArgs.getString("consumerGroup");
final KafkaProducer<String, String> producer = createProducer(parsedArgs);
final KafkaConsumer<String, String> consumer = createConsumer(parsedArgs);
final AtomicLong remainingMessages = new AtomicLong(parsedArgs.getInt("maxMessages") == -1 ? Long.MAX_VALUE : parsedArgs.getInt("maxMessages"));
boolean groupMode = parsedArgs.getBoolean("groupMode");
String topicName = parsedArgs.getString("inputTopic");
final AtomicLong numMessagesProcessedSinceLastRebalance = new AtomicLong(0);
final AtomicLong totalMessageProcessed = new AtomicLong(0);
if (groupMode) {
consumer.subscribe(Collections.singleton(topicName), new ConsumerRebalanceListener() {
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
}
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
remainingMessages.set(partitions.stream().mapToLong(partition -> messagesRemaining(consumer, partition)).sum());
numMessagesProcessedSinceLastRebalance.set(0);
// We use message cap for remaining here as the remainingMessages are not set yet.
System.out.println(statusAsJson(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId, "RebalanceComplete"));
}
});
} else {
TopicPartition inputPartition = new TopicPartition(topicName, parsedArgs.getInt("inputPartition"));
consumer.assign(singleton(inputPartition));
remainingMessages.set(Math.min(messagesRemaining(consumer, inputPartition), remainingMessages.get()));
}
final boolean enableRandomAborts = parsedArgs.getBoolean("enableRandomAborts");
producer.initTransactions();
final AtomicBoolean isShuttingDown = new AtomicBoolean(false);
Exit.addShutdownHook("transactional-message-copier-shutdown-hook", () -> {
isShuttingDown.set(true);
consumer.wakeup();
System.out.println(shutDownString(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId));
});
final boolean useGroupMetadata = parsedArgs.getBoolean("useGroupMetadata");
try {
Random random = new Random();
while (!isShuttingDown.get() && remainingMessages.get() > 0) {
System.out.println(statusAsJson(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId, "ProcessLoop"));
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(200));
if (records.count() > 0) {
try {
producer.beginTransaction();
for (ConsumerRecord<String, String> record : records) {
producer.send(producerRecordFromConsumerRecord(outputTopic, record));
}
long messagesSentWithinCurrentTxn = records.count();
ConsumerGroupMetadata groupMetadata = useGroupMetadata ? consumer.groupMetadata() : new ConsumerGroupMetadata(consumerGroup);
producer.sendOffsetsToTransaction(consumerPositions(consumer), groupMetadata);
if (enableRandomAborts && random.nextInt() % 3 == 0) {
abortTransactionAndResetPosition(producer, consumer);
} else {
producer.commitTransaction();
remainingMessages.getAndAdd(-messagesSentWithinCurrentTxn);
numMessagesProcessedSinceLastRebalance.getAndAdd(messagesSentWithinCurrentTxn);
totalMessageProcessed.getAndAdd(messagesSentWithinCurrentTxn);
}
} catch (ProducerFencedException e) {
throw new KafkaException(String.format("The transactional.id %s has been claimed by another process", transactionalId), e);
} catch (KafkaException e) {
log.debug("Aborting transaction after catching exception", e);
abortTransactionAndResetPosition(producer, consumer);
}
}
}
} catch (WakeupException e) {
if (!isShuttingDown.get()) {
// as part of shutdown.
throw e;
}
} finally {
Utils.closeQuietly(producer, "producer");
Utils.closeQuietly(consumer, "consumer");
}
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.
the class SmokeTestDriver method verifyAll.
private static VerificationResult verifyAll(final Map<String, Set<Integer>> inputs, final Map<String, Map<String, LinkedList<ConsumerRecord<String, Number>>>> events, final boolean printResults) {
final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
boolean pass;
try (final PrintStream resultStream = new PrintStream(byteArrayOutputStream)) {
pass = verifyTAgg(resultStream, inputs, events.get("tagg"), printResults);
pass &= verifySuppressed(resultStream, "min-suppressed", events, printResults);
pass &= verify(resultStream, "min-suppressed", inputs, events, windowedKey -> {
final String unwindowedKey = windowedKey.substring(1, windowedKey.length() - 1).replaceAll("@.*", "");
return getMin(unwindowedKey);
}, printResults);
pass &= verifySuppressed(resultStream, "sws-suppressed", events, printResults);
pass &= verify(resultStream, "min", inputs, events, SmokeTestDriver::getMin, printResults);
pass &= verify(resultStream, "max", inputs, events, SmokeTestDriver::getMax, printResults);
pass &= verify(resultStream, "dif", inputs, events, key -> getMax(key).intValue() - getMin(key).intValue(), printResults);
pass &= verify(resultStream, "sum", inputs, events, SmokeTestDriver::getSum, printResults);
pass &= verify(resultStream, "cnt", inputs, events, key1 -> getMax(key1).intValue() - getMin(key1).intValue() + 1L, printResults);
pass &= verify(resultStream, "avg", inputs, events, SmokeTestDriver::getAvg, printResults);
}
return new VerificationResult(pass, new String(byteArrayOutputStream.toByteArray(), StandardCharsets.UTF_8));
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.
the class EmbeddedKafkaCluster method consume.
/**
* Consume at least n records in a given duration or throw an exception.
*
* @param n the number of expected records in this topic.
* @param maxDuration the max duration to wait for these records (in milliseconds).
* @param topics the topics to subscribe and consume records from.
* @return a {@link ConsumerRecords} collection containing at least n records.
*/
public ConsumerRecords<byte[], byte[]> consume(int n, long maxDuration, String... topics) {
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> records = new HashMap<>();
int consumedRecords = 0;
try (KafkaConsumer<byte[], byte[]> consumer = createConsumerAndSubscribeTo(Collections.emptyMap(), topics)) {
final long startMillis = System.currentTimeMillis();
long allowedDuration = maxDuration;
while (allowedDuration > 0) {
log.debug("Consuming from {} for {} millis.", Arrays.toString(topics), allowedDuration);
ConsumerRecords<byte[], byte[]> rec = consumer.poll(Duration.ofMillis(allowedDuration));
if (rec.isEmpty()) {
allowedDuration = maxDuration - (System.currentTimeMillis() - startMillis);
continue;
}
for (TopicPartition partition : rec.partitions()) {
final List<ConsumerRecord<byte[], byte[]>> r = rec.records(partition);
records.computeIfAbsent(partition, t -> new ArrayList<>()).addAll(r);
consumedRecords += r.size();
}
if (consumedRecords >= n) {
return new ConsumerRecords<>(records);
}
allowedDuration = maxDuration - (System.currentTimeMillis() - startMillis);
}
}
throw new RuntimeException("Could not find enough records. found " + consumedRecords + ", expected " + n);
}
Aggregations