use of io.confluent.connect.storage.format.RecordWriter in project kafka-connect-storage-cloud by confluentinc.
the class TopicPartitionWriter method writeRecord.
private void writeRecord(SinkRecord record) {
currentOffset = record.kafkaOffset();
if (!startOffsets.containsKey(currentEncodedPartition)) {
log.trace("Setting writer's start offset for '{}' to {}", currentEncodedPartition, currentOffset);
startOffsets.put(currentEncodedPartition, currentOffset);
}
RecordWriter writer = getWriter(record, currentEncodedPartition);
writer.write(record);
++recordCount;
}
use of io.confluent.connect.storage.format.RecordWriter in project kafka-connect-storage-cloud by confluentinc.
the class TopicPartitionWriter method commitFile.
private void commitFile(String encodedPartition) {
if (!startOffsets.containsKey(encodedPartition)) {
log.warn("Tried to commit file with missing starting offset partition: {}. Ignoring.");
return;
}
if (writers.containsKey(encodedPartition)) {
RecordWriter writer = writers.get(encodedPartition);
// Commits the file and closes the underlying output stream.
writer.commit();
writers.remove(encodedPartition);
log.debug("Removed writer for '{}'", encodedPartition);
}
startOffsets.remove(encodedPartition);
}
Aggregations