use of org.opendaylight.controller.cluster.io.SharedFileBackedOutputStream in project controller by opendaylight.
the class AbstractLeader method getEntriesToSend.
private List<ReplicatedLogEntry> getEntriesToSend(final FollowerLogInformation followerLogInfo, final ActorSelection followerActor) {
// Try to get all the entries in the journal but not exceeding the max data size for a single AppendEntries
// message.
int maxEntries = (int) context.getReplicatedLog().size();
final int maxDataSize = context.getConfigParams().getSnapshotChunkSize();
final long followerNextIndex = followerLogInfo.getNextIndex();
List<ReplicatedLogEntry> entries = context.getReplicatedLog().getFrom(followerNextIndex, maxEntries, maxDataSize);
// that is the case, then we need to slice it into smaller chunks.
if (!(entries.size() == 1 && entries.get(0).getData().size() > maxDataSize)) {
// Don't need to slice.
return entries;
}
log.debug("{}: Log entry size {} exceeds max payload size {}", logName(), entries.get(0).getData().size(), maxDataSize);
// If an AppendEntries has already been serialized for the log index then reuse the
// SharedFileBackedOutputStream.
final Long logIndex = entries.get(0).getIndex();
SharedFileBackedOutputStream fileBackedStream = sharedSerializedAppendEntriesStreams.get(logIndex);
if (fileBackedStream == null) {
fileBackedStream = context.getFileBackedOutputStreamFactory().newSharedInstance();
final AppendEntries appendEntries = new AppendEntries(currentTerm(), context.getId(), getLogEntryIndex(followerNextIndex - 1), getLogEntryTerm(followerNextIndex - 1), entries, context.getCommitIndex(), getReplicatedToAllIndex(), context.getPayloadVersion());
log.debug("{}: Serializing {} for slicing for follower {}", logName(), appendEntries, followerLogInfo.getId());
try (ObjectOutputStream out = new ObjectOutputStream(fileBackedStream)) {
out.writeObject(appendEntries);
} catch (IOException e) {
log.error("{}: Error serializing {}", logName(), appendEntries, e);
fileBackedStream.cleanup();
return Collections.emptyList();
}
sharedSerializedAppendEntriesStreams.put(logIndex, fileBackedStream);
fileBackedStream.setOnCleanupCallback(index -> {
log.debug("{}: On SharedFileBackedOutputStream cleanup for index {}", logName(), index);
sharedSerializedAppendEntriesStreams.remove(index);
}, logIndex);
} else {
log.debug("{}: Reusing SharedFileBackedOutputStream for follower {}", logName(), followerLogInfo.getId());
fileBackedStream.incrementUsageCount();
}
log.debug("{}: Slicing stream for index {}, follower {}", logName(), logIndex, followerLogInfo.getId());
// Record that slicing is in progress for the follower.
followerLogInfo.setSlicedLogEntryIndex(logIndex);
final FollowerIdentifier identifier = new FollowerIdentifier(followerLogInfo.getId());
appendEntriesMessageSlicer.slice(SliceOptions.builder().identifier(identifier).fileBackedOutputStream(fileBackedStream).sendTo(followerActor).replyTo(actor()).onFailureCallback(failure -> {
log.error("{}: Error slicing AppendEntries for follower {}", logName(), followerLogInfo.getId(), failure);
followerLogInfo.setSlicedLogEntryIndex(FollowerLogInformation.NO_INDEX);
}).build());
return Collections.emptyList();
}
Aggregations