Search in sources :

Example 1 with LogHeader

use of org.corfudb.format.Types.LogHeader in project CorfuDB by CorfuDB.

the class StreamLogFiles method trimLogFile.

private void trimLogFile(String filePath, Set<Long> pendingTrim) throws IOException {
    FileChannel fc = FileChannel.open(FileSystems.getDefault().getPath(filePath + ".copy"), EnumSet.of(StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.WRITE, StandardOpenOption.CREATE, StandardOpenOption.SPARSE));
    FileChannel fc2 = FileChannel.open(FileSystems.getDefault().getPath(getTrimmedFilePath(filePath)), EnumSet.of(StandardOpenOption.APPEND));
    CompactedEntry log = getCompactedEntries(filePath, pendingTrim);
    LogHeader header = log.getLogHeader();
    Collection<LogEntry> compacted = log.getEntries();
    writeHeader(fc, header.getVersion(), header.getVerifyChecksum());
    for (LogEntry entry : compacted) {
        ByteBuffer record = getByteBufferWithMetaData(entry);
        ByteBuffer recordBuf = ByteBuffer.allocate(// Delimiter
        Short.BYTES + record.capacity());
        recordBuf.putShort(RECORD_DELIMITER);
        recordBuf.put(record.array());
        recordBuf.flip();
        fc.write(recordBuf);
    }
    fc.force(true);
    fc.close();
    try (OutputStream outputStream = Channels.newOutputStream(fc2)) {
        // Todo(Maithem) How do we verify that the compacted file is correct?
        for (Long address : pendingTrim) {
            TrimEntry entry = TrimEntry.newBuilder().setChecksum(getChecksum(address)).setAddress(address).build();
            entry.writeDelimitedTo(outputStream);
        }
        outputStream.flush();
        fc2.force(true);
    }
    fc2.close();
    Files.move(Paths.get(filePath + ".copy"), Paths.get(filePath), StandardCopyOption.ATOMIC_MOVE);
    // Force the reload of the new segment
    writeChannels.remove(filePath);
}
Also used : FileChannel(java.nio.channels.FileChannel) OutputStream(java.io.OutputStream) AtomicLong(java.util.concurrent.atomic.AtomicLong) TrimEntry(org.corfudb.format.Types.TrimEntry) ByteBuffer(java.nio.ByteBuffer) LogHeader(org.corfudb.format.Types.LogHeader) LogEntry(org.corfudb.format.Types.LogEntry)

Example 2 with LogHeader

use of org.corfudb.format.Types.LogHeader in project CorfuDB by CorfuDB.

the class StreamLogFiles method writeHeader.

/**
     * Write the header for a Corfu log file.
     *
     * @param fc      The file channel to use.
     * @param version The version number to append to the header.
     * @param verify  Checksum verify flag
     * @throws IOException
     */
public static void writeHeader(FileChannel fc, int version, boolean verify) throws IOException {
    LogHeader header = LogHeader.newBuilder().setVersion(version).setVerifyChecksum(verify).build();
    ByteBuffer buf = getByteBufferWithMetaData(header);
    fc.write(buf);
    fc.force(true);
}
Also used : ByteBuffer(java.nio.ByteBuffer) LogHeader(org.corfudb.format.Types.LogHeader)

Example 3 with LogHeader

use of org.corfudb.format.Types.LogHeader in project CorfuDB by CorfuDB.

the class StreamLogFiles method verifyLogs.

private void verifyLogs() {
    String[] extension = { "log" };
    File dir = new File(logDir);
    if (dir.exists()) {
        Collection<File> files = FileUtils.listFiles(dir, extension, true);
        for (File file : files) {
            try {
                FileInputStream fIn = new FileInputStream(file);
                FileChannel fc = fIn.getChannel();
                ByteBuffer metadataBuf = ByteBuffer.allocate(METADATA_SIZE);
                fc.read(metadataBuf);
                metadataBuf.flip();
                Metadata metadata = Metadata.parseFrom(metadataBuf.array());
                ByteBuffer headerBuf = ByteBuffer.allocate(metadata.getLength());
                fc.read(headerBuf);
                headerBuf.flip();
                LogHeader header = LogHeader.parseFrom(headerBuf.array());
                fc.close();
                fIn.close();
                if (metadata.getChecksum() != getChecksum(header.toByteArray())) {
                    log.error("Checksum mismatch detected while trying to read header for logfile {}", file);
                    throw new DataCorruptionException();
                }
                if (header.getVersion() != VERSION) {
                    String msg = String.format("Log version {} for {} should match the logunit log version {}", header.getVersion(), file.getAbsoluteFile(), VERSION);
                    throw new RuntimeException(msg);
                }
                if (!noVerify && !header.getVerifyChecksum()) {
                    String msg = String.format("Log file {} not generated with checksums, can't verify!", file.getAbsoluteFile());
                    throw new RuntimeException(msg);
                }
            } catch (IOException e) {
                throw new RuntimeException(e.getMessage(), e);
            }
        }
    }
}
Also used : FileChannel(java.nio.channels.FileChannel) IMetadata(org.corfudb.protocols.wireprotocol.IMetadata) Metadata(org.corfudb.format.Types.Metadata) ByteString(com.google.protobuf.ByteString) DataCorruptionException(org.corfudb.runtime.exceptions.DataCorruptionException) IOException(java.io.IOException) File(java.io.File) ByteBuffer(java.nio.ByteBuffer) FileInputStream(java.io.FileInputStream) LogHeader(org.corfudb.format.Types.LogHeader)

Example 4 with LogHeader

use of org.corfudb.format.Types.LogHeader in project CorfuDB by CorfuDB.

the class logReader method processHeader.

final logHeader processHeader() throws IOException {
    fileChannelIn.position(0);
    ByteBuffer mdBuffer = ByteBuffer.allocate(metadataSize);
    int r = fileChannelIn.read(mdBuffer);
    mdBuffer.flip();
    if (fileChannelOut != null) {
        fileChannelOut.write(mdBuffer);
    }
    if (r > 0) {
        logHeader header = new logHeader();
        Metadata md = Metadata.parseFrom(mdBuffer.array());
        int logHeaderSize = md.getLength();
        header.setChecksum(md.getChecksum());
        header.setLength(md.getLength());
        ByteBuffer lhBuffer = ByteBuffer.allocate(logHeaderSize);
        r = fileChannelIn.read(lhBuffer);
        lhBuffer.flip();
        if (fileChannelOut != null) {
            fileChannelOut.write(lhBuffer);
        }
        if (r > 0) {
            LogHeader lh = LogHeader.parseFrom(lhBuffer.array());
            header.setVersion(lh.getVersion());
            header.setVerifyChecksum(lh.getVerifyChecksum());
        }
        return header;
    }
    return new logHeader();
}
Also used : Metadata(org.corfudb.format.Types.Metadata) ByteBuffer(java.nio.ByteBuffer) LogHeader(org.corfudb.format.Types.LogHeader)

Example 5 with LogHeader

use of org.corfudb.format.Types.LogHeader in project CorfuDB by CorfuDB.

the class StreamLogFiles method getCompactedEntries.

private CompactedEntry getCompactedEntries(String filePath, Set<Long> pendingTrim) throws IOException {
    FileChannel fc = getChannel(filePath, true);
    // Skip the header
    ByteBuffer headerMetadataBuf = ByteBuffer.allocate(METADATA_SIZE);
    fc.read(headerMetadataBuf);
    headerMetadataBuf.flip();
    Metadata headerMetadata = Metadata.parseFrom(headerMetadataBuf.array());
    ByteBuffer headerBuf = ByteBuffer.allocate(headerMetadata.getLength());
    fc.read(headerBuf);
    headerBuf.flip();
    LogHeader header = LogHeader.parseFrom(headerBuf.array());
    ByteBuffer o = ByteBuffer.allocate((int) fc.size() - (int) fc.position());
    fc.read(o);
    fc.close();
    o.flip();
    LinkedHashMap<Long, LogEntry> compacted = new LinkedHashMap<>();
    while (o.hasRemaining()) {
        //Skip delimiter
        o.getShort();
        byte[] metadataBuf = new byte[METADATA_SIZE];
        o.get(metadataBuf);
        try {
            Metadata metadata = Metadata.parseFrom(metadataBuf);
            byte[] logEntryBuf = new byte[metadata.getLength()];
            o.get(logEntryBuf);
            LogEntry entry = LogEntry.parseFrom(logEntryBuf);
            if (!noVerify) {
                if (metadata.getChecksum() != getChecksum(entry.toByteArray())) {
                    log.error("Checksum mismatch detected while trying to read address {}", entry.getGlobalAddress());
                    throw new DataCorruptionException();
                }
            }
            if (!pendingTrim.contains(entry.getGlobalAddress())) {
                compacted.put(entry.getGlobalAddress(), entry);
            }
        } catch (InvalidProtocolBufferException e) {
            throw new DataCorruptionException();
        }
    }
    return new CompactedEntry(header, compacted.values());
}
Also used : FileChannel(java.nio.channels.FileChannel) IMetadata(org.corfudb.protocols.wireprotocol.IMetadata) Metadata(org.corfudb.format.Types.Metadata) AtomicLong(java.util.concurrent.atomic.AtomicLong) InvalidProtocolBufferException(com.google.protobuf.InvalidProtocolBufferException) DataCorruptionException(org.corfudb.runtime.exceptions.DataCorruptionException) ByteBuffer(java.nio.ByteBuffer) LogHeader(org.corfudb.format.Types.LogHeader) LogEntry(org.corfudb.format.Types.LogEntry) LinkedHashMap(java.util.LinkedHashMap)

Aggregations

ByteBuffer (java.nio.ByteBuffer)5 LogHeader (org.corfudb.format.Types.LogHeader)5 FileChannel (java.nio.channels.FileChannel)3 Metadata (org.corfudb.format.Types.Metadata)3 AtomicLong (java.util.concurrent.atomic.AtomicLong)2 LogEntry (org.corfudb.format.Types.LogEntry)2 IMetadata (org.corfudb.protocols.wireprotocol.IMetadata)2 DataCorruptionException (org.corfudb.runtime.exceptions.DataCorruptionException)2 ByteString (com.google.protobuf.ByteString)1 InvalidProtocolBufferException (com.google.protobuf.InvalidProtocolBufferException)1 File (java.io.File)1 FileInputStream (java.io.FileInputStream)1 IOException (java.io.IOException)1 OutputStream (java.io.OutputStream)1 LinkedHashMap (java.util.LinkedHashMap)1 TrimEntry (org.corfudb.format.Types.TrimEntry)1