use of org.corfudb.format.Types.LogHeader in project CorfuDB by CorfuDB.
the class StreamLogFiles method trimLogFile.
private void trimLogFile(String filePath, Set<Long> pendingTrim) throws IOException {
FileChannel fc = FileChannel.open(FileSystems.getDefault().getPath(filePath + ".copy"), EnumSet.of(StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.WRITE, StandardOpenOption.CREATE, StandardOpenOption.SPARSE));
FileChannel fc2 = FileChannel.open(FileSystems.getDefault().getPath(getTrimmedFilePath(filePath)), EnumSet.of(StandardOpenOption.APPEND));
CompactedEntry log = getCompactedEntries(filePath, pendingTrim);
LogHeader header = log.getLogHeader();
Collection<LogEntry> compacted = log.getEntries();
writeHeader(fc, header.getVersion(), header.getVerifyChecksum());
for (LogEntry entry : compacted) {
ByteBuffer record = getByteBufferWithMetaData(entry);
ByteBuffer recordBuf = ByteBuffer.allocate(// Delimiter
Short.BYTES + record.capacity());
recordBuf.putShort(RECORD_DELIMITER);
recordBuf.put(record.array());
recordBuf.flip();
fc.write(recordBuf);
}
fc.force(true);
fc.close();
try (OutputStream outputStream = Channels.newOutputStream(fc2)) {
// Todo(Maithem) How do we verify that the compacted file is correct?
for (Long address : pendingTrim) {
TrimEntry entry = TrimEntry.newBuilder().setChecksum(getChecksum(address)).setAddress(address).build();
entry.writeDelimitedTo(outputStream);
}
outputStream.flush();
fc2.force(true);
}
fc2.close();
Files.move(Paths.get(filePath + ".copy"), Paths.get(filePath), StandardCopyOption.ATOMIC_MOVE);
// Force the reload of the new segment
writeChannels.remove(filePath);
}
use of org.corfudb.format.Types.LogHeader in project CorfuDB by CorfuDB.
the class StreamLogFiles method writeHeader.
/**
* Write the header for a Corfu log file.
*
* @param fc The file channel to use.
* @param version The version number to append to the header.
* @param verify Checksum verify flag
* @throws IOException
*/
public static void writeHeader(FileChannel fc, int version, boolean verify) throws IOException {
LogHeader header = LogHeader.newBuilder().setVersion(version).setVerifyChecksum(verify).build();
ByteBuffer buf = getByteBufferWithMetaData(header);
fc.write(buf);
fc.force(true);
}
use of org.corfudb.format.Types.LogHeader in project CorfuDB by CorfuDB.
the class StreamLogFiles method verifyLogs.
private void verifyLogs() {
String[] extension = { "log" };
File dir = new File(logDir);
if (dir.exists()) {
Collection<File> files = FileUtils.listFiles(dir, extension, true);
for (File file : files) {
try {
FileInputStream fIn = new FileInputStream(file);
FileChannel fc = fIn.getChannel();
ByteBuffer metadataBuf = ByteBuffer.allocate(METADATA_SIZE);
fc.read(metadataBuf);
metadataBuf.flip();
Metadata metadata = Metadata.parseFrom(metadataBuf.array());
ByteBuffer headerBuf = ByteBuffer.allocate(metadata.getLength());
fc.read(headerBuf);
headerBuf.flip();
LogHeader header = LogHeader.parseFrom(headerBuf.array());
fc.close();
fIn.close();
if (metadata.getChecksum() != getChecksum(header.toByteArray())) {
log.error("Checksum mismatch detected while trying to read header for logfile {}", file);
throw new DataCorruptionException();
}
if (header.getVersion() != VERSION) {
String msg = String.format("Log version {} for {} should match the logunit log version {}", header.getVersion(), file.getAbsoluteFile(), VERSION);
throw new RuntimeException(msg);
}
if (!noVerify && !header.getVerifyChecksum()) {
String msg = String.format("Log file {} not generated with checksums, can't verify!", file.getAbsoluteFile());
throw new RuntimeException(msg);
}
} catch (IOException e) {
throw new RuntimeException(e.getMessage(), e);
}
}
}
}
use of org.corfudb.format.Types.LogHeader in project CorfuDB by CorfuDB.
the class logReader method processHeader.
final logHeader processHeader() throws IOException {
fileChannelIn.position(0);
ByteBuffer mdBuffer = ByteBuffer.allocate(metadataSize);
int r = fileChannelIn.read(mdBuffer);
mdBuffer.flip();
if (fileChannelOut != null) {
fileChannelOut.write(mdBuffer);
}
if (r > 0) {
logHeader header = new logHeader();
Metadata md = Metadata.parseFrom(mdBuffer.array());
int logHeaderSize = md.getLength();
header.setChecksum(md.getChecksum());
header.setLength(md.getLength());
ByteBuffer lhBuffer = ByteBuffer.allocate(logHeaderSize);
r = fileChannelIn.read(lhBuffer);
lhBuffer.flip();
if (fileChannelOut != null) {
fileChannelOut.write(lhBuffer);
}
if (r > 0) {
LogHeader lh = LogHeader.parseFrom(lhBuffer.array());
header.setVersion(lh.getVersion());
header.setVerifyChecksum(lh.getVerifyChecksum());
}
return header;
}
return new logHeader();
}
use of org.corfudb.format.Types.LogHeader in project CorfuDB by CorfuDB.
the class StreamLogFiles method getCompactedEntries.
private CompactedEntry getCompactedEntries(String filePath, Set<Long> pendingTrim) throws IOException {
FileChannel fc = getChannel(filePath, true);
// Skip the header
ByteBuffer headerMetadataBuf = ByteBuffer.allocate(METADATA_SIZE);
fc.read(headerMetadataBuf);
headerMetadataBuf.flip();
Metadata headerMetadata = Metadata.parseFrom(headerMetadataBuf.array());
ByteBuffer headerBuf = ByteBuffer.allocate(headerMetadata.getLength());
fc.read(headerBuf);
headerBuf.flip();
LogHeader header = LogHeader.parseFrom(headerBuf.array());
ByteBuffer o = ByteBuffer.allocate((int) fc.size() - (int) fc.position());
fc.read(o);
fc.close();
o.flip();
LinkedHashMap<Long, LogEntry> compacted = new LinkedHashMap<>();
while (o.hasRemaining()) {
//Skip delimiter
o.getShort();
byte[] metadataBuf = new byte[METADATA_SIZE];
o.get(metadataBuf);
try {
Metadata metadata = Metadata.parseFrom(metadataBuf);
byte[] logEntryBuf = new byte[metadata.getLength()];
o.get(logEntryBuf);
LogEntry entry = LogEntry.parseFrom(logEntryBuf);
if (!noVerify) {
if (metadata.getChecksum() != getChecksum(entry.toByteArray())) {
log.error("Checksum mismatch detected while trying to read address {}", entry.getGlobalAddress());
throw new DataCorruptionException();
}
}
if (!pendingTrim.contains(entry.getGlobalAddress())) {
compacted.put(entry.getGlobalAddress(), entry);
}
} catch (InvalidProtocolBufferException e) {
throw new DataCorruptionException();
}
}
return new CompactedEntry(header, compacted.values());
}
Aggregations