use of org.apache.spark.streaming.kafka010.CanCommitOffsets in project lambda-arch by apssouza22.
the class TrafficOffsetCommitCallback method commitOffset.
/**
* Commit the ack to kafka after process have completed
*
* @param directKafkaStream
*/
private void commitOffset(JavaInputDStream<ConsumerRecord<String, IoTData>> directKafkaStream) {
directKafkaStream.foreachRDD((JavaRDD<ConsumerRecord<String, IoTData>> trafficRdd) -> {
if (!trafficRdd.isEmpty()) {
OffsetRange[] offsetRanges = ((HasOffsetRanges) trafficRdd.rdd()).offsetRanges();
CanCommitOffsets canCommitOffsets = (CanCommitOffsets) directKafkaStream.inputDStream();
canCommitOffsets.commitAsync(offsetRanges, new TrafficOffsetCommitCallback());
}
});
}
use of org.apache.spark.streaming.kafka010.CanCommitOffsets in project lambda-arch by apssouza22.
the class TrafficOffsetCommitCallback method commitOffset.
/**
* Commit the ack to kafka after process have completed
* This is our fault-tolerance implementation
*
* @param directKafkaStream
*/
private void commitOffset(JavaInputDStream<ConsumerRecord<String, IoTData>> directKafkaStream) {
directKafkaStream.foreachRDD((JavaRDD<ConsumerRecord<String, IoTData>> trafficRdd) -> {
if (!trafficRdd.isEmpty()) {
OffsetRange[] offsetRanges = ((HasOffsetRanges) trafficRdd.rdd()).offsetRanges();
CanCommitOffsets canCommitOffsets = (CanCommitOffsets) directKafkaStream.inputDStream();
canCommitOffsets.commitAsync(offsetRanges, new TrafficOffsetCommitCallback());
}
});
}
Aggregations