use of org.corfudb.format.Types.LogEntry in project CorfuDB by CorfuDB.
the class StreamLogFiles method trimLogFile.
private void trimLogFile(String filePath, Set<Long> pendingTrim) throws IOException {
FileChannel fc = FileChannel.open(FileSystems.getDefault().getPath(filePath + ".copy"), EnumSet.of(StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.WRITE, StandardOpenOption.CREATE, StandardOpenOption.SPARSE));
FileChannel fc2 = FileChannel.open(FileSystems.getDefault().getPath(getTrimmedFilePath(filePath)), EnumSet.of(StandardOpenOption.APPEND));
CompactedEntry log = getCompactedEntries(filePath, pendingTrim);
LogHeader header = log.getLogHeader();
Collection<LogEntry> compacted = log.getEntries();
writeHeader(fc, header.getVersion(), header.getVerifyChecksum());
for (LogEntry entry : compacted) {
ByteBuffer record = getByteBufferWithMetaData(entry);
ByteBuffer recordBuf = ByteBuffer.allocate(// Delimiter
Short.BYTES + record.capacity());
recordBuf.putShort(RECORD_DELIMITER);
recordBuf.put(record.array());
recordBuf.flip();
fc.write(recordBuf);
}
fc.force(true);
fc.close();
try (OutputStream outputStream = Channels.newOutputStream(fc2)) {
// Todo(Maithem) How do we verify that the compacted file is correct?
for (Long address : pendingTrim) {
TrimEntry entry = TrimEntry.newBuilder().setChecksum(getChecksum(address)).setAddress(address).build();
entry.writeDelimitedTo(outputStream);
}
outputStream.flush();
fc2.force(true);
}
fc2.close();
Files.move(Paths.get(filePath + ".copy"), Paths.get(filePath), StandardCopyOption.ATOMIC_MOVE);
// Force the reload of the new segment
writeChannels.remove(filePath);
}
use of org.corfudb.format.Types.LogEntry in project CorfuDB by CorfuDB.
the class StreamLogFiles method initializeMaxGlobalAddress.
private void initializeMaxGlobalAddress() {
long tailSegment = serverContext.getTailSegment();
long addressInTailSegment = (tailSegment * RECORDS_PER_LOG_FILE) + 1;
SegmentHandle sh = getSegmentHandleForAddress(addressInTailSegment);
try {
Collection<LogEntry> segmentEntries = (Collection<LogEntry>) getCompactedEntries(sh.getFileName(), new HashSet()).getEntries();
for (LogEntry entry : segmentEntries) {
long currentAddress = entry.getGlobalAddress();
globalTail.getAndUpdate(maxTail -> currentAddress > maxTail ? currentAddress : maxTail);
}
} catch (IOException e) {
throw new RuntimeException(e.getMessage(), e);
}
lastSegment = tailSegment;
}
use of org.corfudb.format.Types.LogEntry in project CorfuDB by CorfuDB.
the class StreamLogFiles method readAddressSpace.
/**
* Reads an address space from a log file into a SegmentHandle
*
* @param sh
*/
private void readAddressSpace(SegmentHandle sh) throws IOException {
long logFileSize;
try (MultiReadWriteLock.AutoCloseableLock ignored = segmentLocks.acquireReadLock(sh.getSegment())) {
logFileSize = sh.logChannel.size();
}
FileChannel fc = getChannel(sh.fileName, true);
if (fc == null) {
log.trace("Can't read address space, {} doesn't exist", sh.fileName);
return;
}
// Skip the header
ByteBuffer headerMetadataBuf = ByteBuffer.allocate(METADATA_SIZE);
fc.read(headerMetadataBuf);
headerMetadataBuf.flip();
Metadata headerMetadata = Metadata.parseFrom(headerMetadataBuf.array());
fc.position(fc.position() + headerMetadata.getLength());
long channelOffset = fc.position();
ByteBuffer o = ByteBuffer.allocate((int) logFileSize - (int) fc.position());
fc.read(o);
fc.close();
o.flip();
while (o.hasRemaining()) {
short magic = o.getShort();
channelOffset += Short.BYTES;
if (magic != RECORD_DELIMITER) {
log.error("Expected a delimiter but found something else while trying to read file {}", sh.fileName);
throw new DataCorruptionException();
}
byte[] metadataBuf = new byte[METADATA_SIZE];
o.get(metadataBuf);
channelOffset += METADATA_SIZE;
try {
Metadata metadata = Metadata.parseFrom(metadataBuf);
byte[] logEntryBuf = new byte[metadata.getLength()];
o.get(logEntryBuf);
LogEntry entry = LogEntry.parseFrom(logEntryBuf);
if (!noVerify) {
if (metadata.getChecksum() != getChecksum(entry.toByteArray())) {
log.error("Checksum mismatch detected while trying to read file {}", sh.fileName);
throw new DataCorruptionException();
}
}
sh.knownAddresses.put(entry.getGlobalAddress(), new AddressMetaData(metadata.getChecksum(), metadata.getLength(), channelOffset));
channelOffset += metadata.getLength();
} catch (InvalidProtocolBufferException e) {
throw new DataCorruptionException();
}
}
}
use of org.corfudb.format.Types.LogEntry in project CorfuDB by CorfuDB.
the class logReader method processRecord.
final LogEntryExtended processRecord() throws IOException {
ByteBuffer commaBuffer = ByteBuffer.allocate(2);
int bytesRead = fileChannelIn.read(commaBuffer);
commaBuffer.flip();
Short delim = commaBuffer.getShort();
commaBuffer.flip();
if (delim != StreamLogFiles.RECORD_DELIMITER) {
System.out.println("Incorrect delimiter");
}
ByteBuffer mdBuffer = ByteBuffer.allocate(metadataSize);
bytesRead += fileChannelIn.read(mdBuffer);
mdBuffer.flip();
Metadata md = Metadata.parseFrom(mdBuffer.array());
ByteBuffer recordBuffer = ByteBuffer.allocate(md.getLength());
bytesRead += fileChannelIn.read(recordBuffer);
recordBuffer.flip();
int cksum = StreamLogFiles.getChecksum(recordBuffer.array());
if (cksum != md.getChecksum()) {
System.out.println("Checksum ERROR");
}
LogEntry leNew = processRecordBody(recordBuffer);
return new LogEntryExtended(leNew, bytesRead, cksum);
}
use of org.corfudb.format.Types.LogEntry in project CorfuDB by CorfuDB.
the class StreamLogFiles method writeRecord.
/**
* Write a log entry record to a file.
*
* @param fh The file handle to use.
* @param address The address of the entry.
* @param entry The LogData to append.
* @return Returns metadata for the written record
*/
private AddressMetaData writeRecord(SegmentHandle fh, long address, LogData entry) throws IOException {
LogEntry logEntry = getLogEntry(address, entry);
Metadata metadata = getMetadata(logEntry);
ByteBuffer record = getByteBuffer(metadata, logEntry);
ByteBuffer recordBuf = ByteBuffer.allocate(// Delimiter
Short.BYTES + record.capacity());
recordBuf.putShort(RECORD_DELIMITER);
recordBuf.put(record.array());
recordBuf.flip();
long channelOffset;
try (MultiReadWriteLock.AutoCloseableLock ignored = segmentLocks.acquireWriteLock(fh.getSegment())) {
channelOffset = fh.logChannel.position() + Short.BYTES + METADATA_SIZE;
fh.logChannel.write(recordBuf);
channelsToSync.add(fh.logChannel);
syncTailSegment(address);
}
return new AddressMetaData(metadata.getChecksum(), metadata.getLength(), channelOffset);
}
Aggregations