use of org.corfudb.format.Types.TrimEntry in project CorfuDB by CorfuDB.
the class StreamLogFiles method trimLogFile.
private void trimLogFile(String filePath, Set<Long> pendingTrim) throws IOException {
FileChannel fc = FileChannel.open(FileSystems.getDefault().getPath(filePath + ".copy"), EnumSet.of(StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.WRITE, StandardOpenOption.CREATE, StandardOpenOption.SPARSE));
FileChannel fc2 = FileChannel.open(FileSystems.getDefault().getPath(getTrimmedFilePath(filePath)), EnumSet.of(StandardOpenOption.APPEND));
CompactedEntry log = getCompactedEntries(filePath, pendingTrim);
LogHeader header = log.getLogHeader();
Collection<LogEntry> compacted = log.getEntries();
writeHeader(fc, header.getVersion(), header.getVerifyChecksum());
for (LogEntry entry : compacted) {
ByteBuffer record = getByteBufferWithMetaData(entry);
ByteBuffer recordBuf = ByteBuffer.allocate(// Delimiter
Short.BYTES + record.capacity());
recordBuf.putShort(RECORD_DELIMITER);
recordBuf.put(record.array());
recordBuf.flip();
fc.write(recordBuf);
}
fc.force(true);
fc.close();
try (OutputStream outputStream = Channels.newOutputStream(fc2)) {
// Todo(Maithem) How do we verify that the compacted file is correct?
for (Long address : pendingTrim) {
TrimEntry entry = TrimEntry.newBuilder().setChecksum(getChecksum(address)).setAddress(address).build();
entry.writeDelimitedTo(outputStream);
}
outputStream.flush();
fc2.force(true);
}
fc2.close();
Files.move(Paths.get(filePath + ".copy"), Paths.get(filePath), StandardCopyOption.ATOMIC_MOVE);
// Force the reload of the new segment
writeChannels.remove(filePath);
}
use of org.corfudb.format.Types.TrimEntry in project CorfuDB by CorfuDB.
the class StreamLogFiles method loadTrimAddresses.
private void loadTrimAddresses(SegmentHandle sh) throws IOException {
long trimmedSize;
long pendingTrimSize;
//TODO(Maithem) compute checksums and refactor
try (MultiReadWriteLock.AutoCloseableLock ignored = segmentLocks.acquireReadLock(sh.getSegment())) {
trimmedSize = sh.getTrimmedChannel().size();
pendingTrimSize = sh.getPendingTrimChannel().size();
}
FileChannel fcTrimmed = getChannel(getTrimmedFilePath(sh.getFileName()), true);
FileChannel fcPending = getChannel(getPendingTrimsFilePath(sh.getFileName()), true);
if (fcTrimmed == null) {
return;
}
InputStream inputStream = Channels.newInputStream(fcTrimmed);
while (fcTrimmed.position() < trimmedSize) {
TrimEntry trimEntry = TrimEntry.parseDelimitedFrom(inputStream);
sh.getTrimmedAddresses().add(trimEntry.getAddress());
}
inputStream.close();
fcTrimmed.close();
if (fcPending == null) {
return;
}
inputStream = Channels.newInputStream(fcPending);
while (fcPending.position() < pendingTrimSize) {
TrimEntry trimEntry = TrimEntry.parseDelimitedFrom(inputStream);
sh.getPendingTrims().add(trimEntry.getAddress());
}
inputStream.close();
fcPending.close();
}
use of org.corfudb.format.Types.TrimEntry in project CorfuDB by CorfuDB.
the class StreamLogFiles method trim.
@Override
public void trim(long address) {
SegmentHandle handle = getSegmentHandleForAddress(address);
if (!handle.getKnownAddresses().containsKey(address) || handle.getPendingTrims().contains(address)) {
return;
}
TrimEntry entry = TrimEntry.newBuilder().setChecksum(getChecksum(address)).setAddress(address).build();
// TODO(Maithem) possibly move this to SegmentHandle. Do we need to close and flush?
OutputStream outputStream = Channels.newOutputStream(handle.getPendingTrimChannel());
try {
entry.writeDelimitedTo(outputStream);
outputStream.flush();
handle.pendingTrims.add(address);
channelsToSync.add(handle.getPendingTrimChannel());
} catch (IOException e) {
log.warn("Exception while writing a trim entry {} : {}", address, e.toString());
}
}
Aggregations