Search in sources :

Example 11 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project streamsx.kafka by IBMStreams.

the class TransactionalKafkaProducerClient method checkpoint.

@Override
public void checkpoint(Checkpoint checkpoint) throws Exception {
    final long currentSequenceId = checkpoint.getSequenceId();
    if (logger.isDebugEnabled())
        logger.debug("TransactionalKafkaProducerClient -- CHECKPOINT id=" + currentSequenceId);
    // when we checkpoint, we must have a transaction. open a transaction if not yet done ...
    checkAndBeginTransaction();
    if (logger.isDebugEnabled())
        logger.debug("currentSequenceId=" + currentSequenceId + ", lastSuccessSequenceId=" + lastSuccessfulSequenceId);
    boolean doCommit = true;
    // that's why the second condition is checked first.
    if (currentSequenceId > lastSuccessfulSequenceId + 1) {
        // must be read with 'isolation.level=read_committed'
        long committedSequenceId = getCommittedSequenceIdFromCtrlTopic();
        if (logger.isDebugEnabled())
            logger.debug("committedSequenceId=" + committedSequenceId);
        if (lastSuccessfulSequenceId < committedSequenceId) {
            if (logger.isDebugEnabled())
                logger.debug("Aborting transaction due to lastSuccessfulSequenceId < committedSequenceId");
            // If the last successful sequence ID is less than
            // the committed sequence ID, this transaction has
            // been processed before and is a duplicate.
            // Discard this transaction.
            abortTransaction();
            doCommit = false;
            lastSuccessfulSequenceId = committedSequenceId;
        }
    }
    if (logger.isDebugEnabled())
        logger.debug("doCommit = " + doCommit);
    if (doCommit) {
        RecordMetadata lastCommittedControlRecordMetadata = commitTransaction(currentSequenceId);
        lastSuccessfulSequenceId = currentSequenceId;
        TopicPartition tp = new TopicPartition(lastCommittedControlRecordMetadata.topic(), lastCommittedControlRecordMetadata.partition());
        controlTopicInitialOffsets.put(tp, lastCommittedControlRecordMetadata.offset());
    // The 'controlTopicInitialOffsets' need not be synced back to the JCP. The CV is for reset to initial state.
    // this.startOffsetsCV.setValue (serializeObject (controlTopicInitialOffsets));
    }
    transactionInProgress.set(false);
    // save the last successful seq ID
    if (logger.isDebugEnabled())
        logger.debug("Checkpointing lastSuccessfulSequenceId: " + lastSuccessfulSequenceId);
    checkpoint.getOutputStream().writeLong(lastSuccessfulSequenceId);
    // save the control topic offsets
    if (logger.isDebugEnabled())
        logger.debug("Checkpointing control topic offsets: " + controlTopicInitialOffsets);
    checkpoint.getOutputStream().writeObject(controlTopicInitialOffsets);
    if (!lazyTransactionBegin) {
        // start a new transaction
        checkAndBeginTransaction();
    }
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) TopicPartition(org.apache.kafka.common.TopicPartition)

Example 12 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project brave by openzipkin.

the class TracingCallbackTest method createRecordMetadata.

RecordMetadata createRecordMetadata() {
    TopicPartition tp = new TopicPartition("foo", 0);
    long timestamp = 2340234L;
    int keySize = 3;
    int valueSize = 5;
    Long checksum = 908923L;
    return new RecordMetadata(tp, -1L, -1L, timestamp, checksum, keySize, valueSize);
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) TopicPartition(org.apache.kafka.common.TopicPartition)

Example 13 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project cruise-control by linkedin.

the class CruiseControlMetricsReporter method sendCruiseControlMetric.

/**
 * Send a CruiseControlMetric to the Kafka topic.
 * @param ccm the Cruise Control metric to send.
 */
public void sendCruiseControlMetric(CruiseControlMetric ccm) {
    // Use topic name as key if existing so that the same sampler will be able to collect all the information
    // of a topic.
    String key = ccm.metricClassId() == CruiseControlMetric.MetricClassId.TOPIC_METRIC ? ((TopicMetric) ccm).topic() : Integer.toString(ccm.brokerId());
    ProducerRecord<String, CruiseControlMetric> producerRecord = new ProducerRecord<>(_cruiseControlMetricsTopic, null, ccm.time(), key, ccm);
    LOG.debug("Sending Cruise Control metric {}.", ccm);
    _producer.send(producerRecord, new Callback() {

        @Override
        public void onCompletion(RecordMetadata recordMetadata, Exception e) {
            if (e != null) {
                LOG.warn("Failed to send Cruise Control metric {}", ccm);
                _numMetricSendFailure++;
            }
        }
    });
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) CruiseControlMetric(com.linkedin.kafka.cruisecontrol.metricsreporter.metric.CruiseControlMetric) Callback(org.apache.kafka.clients.producer.Callback) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) InterruptException(org.apache.kafka.common.errors.InterruptException)

Example 14 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project cruise-control by linkedin.

the class KafkaSampleStore method storeSamples.

@Override
public void storeSamples(MetricSampler.Samples samples) {
    final AtomicInteger metricSampleCount = new AtomicInteger(0);
    for (PartitionMetricSample sample : samples.partitionMetricSamples()) {
        _producer.send(new ProducerRecord<>(_partitionMetricSampleStoreTopic, null, sample.sampleTime(), null, sample.toBytes()), new Callback() {

            @Override
            public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                if (e == null) {
                    metricSampleCount.incrementAndGet();
                } else {
                    LOG.error("Failed to produce partition metric sample for {} of timestamp {} due to exception", sample.entity().tp(), sample.sampleTime(), e);
                }
            }
        });
    }
    final AtomicInteger brokerMetricSampleCount = new AtomicInteger(0);
    for (BrokerMetricSample sample : samples.brokerMetricSamples()) {
        _producer.send(new ProducerRecord<>(_brokerMetricSampleStoreTopic, sample.toBytes()), new Callback() {

            @Override
            public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                if (e == null) {
                    brokerMetricSampleCount.incrementAndGet();
                } else {
                    LOG.error("Failed to produce model training sample due to exception", e);
                }
            }
        });
    }
    _producer.flush();
    LOG.debug("Stored {} partition metric samples and {} broker metric samples to Kafka", metricSampleCount.get(), brokerMetricSampleCount.get());
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Callback(org.apache.kafka.clients.producer.Callback) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) UnknownVersionException(com.linkedin.kafka.cruisecontrol.metricsreporter.exception.UnknownVersionException) KafkaException(org.apache.kafka.common.KafkaException)

Example 15 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project ksql by confluentinc.

the class IntegrationTestHarness method produceData.

/**
 * Topic topicName will be automatically created if it doesn't exist.
 * @param topicName
 * @param recordsToPublish
 * @param timestamp
 * @return
 * @throws InterruptedException
 * @throws TimeoutException
 * @throws ExecutionException
 */
public Map<String, RecordMetadata> produceData(String topicName, Map<String, GenericRow> recordsToPublish, Serializer<GenericRow> serializer, Long timestamp) throws InterruptedException, TimeoutException, ExecutionException {
    createTopic(topicName);
    Properties producerConfig = properties();
    KafkaProducer<String, GenericRow> producer = new KafkaProducer<>(producerConfig, new StringSerializer(), serializer);
    Map<String, RecordMetadata> result = new HashMap<>();
    for (Map.Entry<String, GenericRow> recordEntry : recordsToPublish.entrySet()) {
        String key = recordEntry.getKey();
        Future<RecordMetadata> recordMetadataFuture = producer.send(buildRecord(topicName, timestamp, recordEntry, key));
        result.put(key, recordMetadataFuture.get(TEST_RECORD_FUTURE_TIMEOUT_MS, TimeUnit.MILLISECONDS));
    }
    producer.close();
    return result;
}
Also used : GenericRow(io.confluent.ksql.GenericRow) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) HashMap(java.util.HashMap) Properties(java.util.Properties) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) HashMap(java.util.HashMap) Map(java.util.Map)

Aggregations

RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)189 Test (org.junit.Test)64 Node (org.apache.kafka.common.Node)50 Test (org.junit.jupiter.api.Test)50 TopicPartition (org.apache.kafka.common.TopicPartition)48 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)45 ExecutionException (java.util.concurrent.ExecutionException)33 Callback (org.apache.kafka.clients.producer.Callback)32 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)31 Properties (java.util.Properties)30 HashMap (java.util.HashMap)24 TimeoutException (org.apache.kafka.common.errors.TimeoutException)23 ArrayList (java.util.ArrayList)21 KafkaException (org.apache.kafka.common.KafkaException)19 List (java.util.List)15 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)15 Metrics (org.apache.kafka.common.metrics.Metrics)15 LinkedHashMap (java.util.LinkedHashMap)13 Future (java.util.concurrent.Future)13 Map (java.util.Map)12