use of org.apache.kafka.clients.producer.RecordMetadata in project streamsx.kafka by IBMStreams.
the class TransactionalKafkaProducerClient method checkpoint.
@Override
public void checkpoint(Checkpoint checkpoint) throws Exception {
final long currentSequenceId = checkpoint.getSequenceId();
if (logger.isDebugEnabled())
logger.debug("TransactionalKafkaProducerClient -- CHECKPOINT id=" + currentSequenceId);
// when we checkpoint, we must have a transaction. open a transaction if not yet done ...
checkAndBeginTransaction();
if (logger.isDebugEnabled())
logger.debug("currentSequenceId=" + currentSequenceId + ", lastSuccessSequenceId=" + lastSuccessfulSequenceId);
boolean doCommit = true;
// that's why the second condition is checked first.
if (currentSequenceId > lastSuccessfulSequenceId + 1) {
// must be read with 'isolation.level=read_committed'
long committedSequenceId = getCommittedSequenceIdFromCtrlTopic();
if (logger.isDebugEnabled())
logger.debug("committedSequenceId=" + committedSequenceId);
if (lastSuccessfulSequenceId < committedSequenceId) {
if (logger.isDebugEnabled())
logger.debug("Aborting transaction due to lastSuccessfulSequenceId < committedSequenceId");
// If the last successful sequence ID is less than
// the committed sequence ID, this transaction has
// been processed before and is a duplicate.
// Discard this transaction.
abortTransaction();
doCommit = false;
lastSuccessfulSequenceId = committedSequenceId;
}
}
if (logger.isDebugEnabled())
logger.debug("doCommit = " + doCommit);
if (doCommit) {
RecordMetadata lastCommittedControlRecordMetadata = commitTransaction(currentSequenceId);
lastSuccessfulSequenceId = currentSequenceId;
TopicPartition tp = new TopicPartition(lastCommittedControlRecordMetadata.topic(), lastCommittedControlRecordMetadata.partition());
controlTopicInitialOffsets.put(tp, lastCommittedControlRecordMetadata.offset());
// The 'controlTopicInitialOffsets' need not be synced back to the JCP. The CV is for reset to initial state.
// this.startOffsetsCV.setValue (serializeObject (controlTopicInitialOffsets));
}
transactionInProgress.set(false);
// save the last successful seq ID
if (logger.isDebugEnabled())
logger.debug("Checkpointing lastSuccessfulSequenceId: " + lastSuccessfulSequenceId);
checkpoint.getOutputStream().writeLong(lastSuccessfulSequenceId);
// save the control topic offsets
if (logger.isDebugEnabled())
logger.debug("Checkpointing control topic offsets: " + controlTopicInitialOffsets);
checkpoint.getOutputStream().writeObject(controlTopicInitialOffsets);
if (!lazyTransactionBegin) {
// start a new transaction
checkAndBeginTransaction();
}
}
use of org.apache.kafka.clients.producer.RecordMetadata in project brave by openzipkin.
the class TracingCallbackTest method createRecordMetadata.
RecordMetadata createRecordMetadata() {
TopicPartition tp = new TopicPartition("foo", 0);
long timestamp = 2340234L;
int keySize = 3;
int valueSize = 5;
Long checksum = 908923L;
return new RecordMetadata(tp, -1L, -1L, timestamp, checksum, keySize, valueSize);
}
use of org.apache.kafka.clients.producer.RecordMetadata in project cruise-control by linkedin.
the class CruiseControlMetricsReporter method sendCruiseControlMetric.
/**
* Send a CruiseControlMetric to the Kafka topic.
* @param ccm the Cruise Control metric to send.
*/
public void sendCruiseControlMetric(CruiseControlMetric ccm) {
// Use topic name as key if existing so that the same sampler will be able to collect all the information
// of a topic.
String key = ccm.metricClassId() == CruiseControlMetric.MetricClassId.TOPIC_METRIC ? ((TopicMetric) ccm).topic() : Integer.toString(ccm.brokerId());
ProducerRecord<String, CruiseControlMetric> producerRecord = new ProducerRecord<>(_cruiseControlMetricsTopic, null, ccm.time(), key, ccm);
LOG.debug("Sending Cruise Control metric {}.", ccm);
_producer.send(producerRecord, new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e != null) {
LOG.warn("Failed to send Cruise Control metric {}", ccm);
_numMetricSendFailure++;
}
}
});
}
use of org.apache.kafka.clients.producer.RecordMetadata in project cruise-control by linkedin.
the class KafkaSampleStore method storeSamples.
@Override
public void storeSamples(MetricSampler.Samples samples) {
final AtomicInteger metricSampleCount = new AtomicInteger(0);
for (PartitionMetricSample sample : samples.partitionMetricSamples()) {
_producer.send(new ProducerRecord<>(_partitionMetricSampleStoreTopic, null, sample.sampleTime(), null, sample.toBytes()), new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e == null) {
metricSampleCount.incrementAndGet();
} else {
LOG.error("Failed to produce partition metric sample for {} of timestamp {} due to exception", sample.entity().tp(), sample.sampleTime(), e);
}
}
});
}
final AtomicInteger brokerMetricSampleCount = new AtomicInteger(0);
for (BrokerMetricSample sample : samples.brokerMetricSamples()) {
_producer.send(new ProducerRecord<>(_brokerMetricSampleStoreTopic, sample.toBytes()), new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e == null) {
brokerMetricSampleCount.incrementAndGet();
} else {
LOG.error("Failed to produce model training sample due to exception", e);
}
}
});
}
_producer.flush();
LOG.debug("Stored {} partition metric samples and {} broker metric samples to Kafka", metricSampleCount.get(), brokerMetricSampleCount.get());
}
use of org.apache.kafka.clients.producer.RecordMetadata in project ksql by confluentinc.
the class IntegrationTestHarness method produceData.
/**
* Topic topicName will be automatically created if it doesn't exist.
* @param topicName
* @param recordsToPublish
* @param timestamp
* @return
* @throws InterruptedException
* @throws TimeoutException
* @throws ExecutionException
*/
public Map<String, RecordMetadata> produceData(String topicName, Map<String, GenericRow> recordsToPublish, Serializer<GenericRow> serializer, Long timestamp) throws InterruptedException, TimeoutException, ExecutionException {
createTopic(topicName);
Properties producerConfig = properties();
KafkaProducer<String, GenericRow> producer = new KafkaProducer<>(producerConfig, new StringSerializer(), serializer);
Map<String, RecordMetadata> result = new HashMap<>();
for (Map.Entry<String, GenericRow> recordEntry : recordsToPublish.entrySet()) {
String key = recordEntry.getKey();
Future<RecordMetadata> recordMetadataFuture = producer.send(buildRecord(topicName, timestamp, recordEntry, key));
result.put(key, recordMetadataFuture.get(TEST_RECORD_FUTURE_TIMEOUT_MS, TimeUnit.MILLISECONDS));
}
producer.close();
return result;
}
Aggregations