use of org.neo4j.causalclustering.core.consensus.log.EntryRecord in project neo4j by neo4j.
the class DumpSegmentedRaftLog method dump.
private int dump(String filenameOrDirectory, PrintStream out) throws IOException, DamagedLogStorageException, DisposedException {
LogProvider logProvider = NullLogProvider.getInstance();
final int[] logsFound = { 0 };
FileNames fileNames = new FileNames(new File(filenameOrDirectory));
ReaderPool readerPool = new ReaderPool(0, logProvider, fileNames, fileSystem, Clocks.systemClock());
RecoveryProtocol recoveryProtocol = new RecoveryProtocol(fileSystem, fileNames, readerPool, marshal, logProvider);
Segments segments = recoveryProtocol.run().segments;
segments.visit((segment) -> {
logsFound[0]++;
out.println("=== " + segment.getFilename() + " ===");
SegmentHeader header = segment.header();
out.println(header.toString());
try (IOCursor<EntryRecord> cursor = segment.getCursor(header.prevIndex() + 1)) {
while (cursor.next()) {
out.println(cursor.get().toString());
}
} catch (DisposedException e) {
e.printStackTrace();
System.exit(-1);
return true;
} catch (IOException e) {
e.printStackTrace();
System.exit(-1);
return true;
}
return false;
});
return logsFound[0];
}
use of org.neo4j.causalclustering.core.consensus.log.EntryRecord in project neo4j by neo4j.
the class EntryRecordCursor method next.
@Override
public boolean next() throws IOException {
EntryRecord entryRecord;
try {
entryRecord = read(bufferedReader, contentMarshal);
} catch (EndOfStreamException e) {
currentRecord.invalidate();
return false;
} catch (IOException e) {
hadError = true;
throw e;
}
currentRecord.set(entryRecord);
position.byteOffset = bufferedReader.position();
position.logIndex++;
return true;
}
use of org.neo4j.causalclustering.core.consensus.log.EntryRecord in project neo4j by neo4j.
the class SegmentFileTest method shouldNotReturnReaderExperiencingErrorToPool.
@Test
public void shouldNotReturnReaderExperiencingErrorToPool() throws Exception {
// given
StoreChannel channel = mock(StoreChannel.class);
Reader reader = mock(Reader.class);
ReaderPool readerPool = mock(ReaderPool.class);
when(channel.read(any(ByteBuffer.class))).thenThrow(new IOException());
when(reader.channel()).thenReturn(channel);
when(readerPool.acquire(anyLong(), anyLong())).thenReturn(reader);
try (SegmentFile segment = create(fsRule.get(), fileNames.getForVersion(0), readerPool, 0, contentMarshal, logProvider, segmentHeader)) {
// given
IOCursor<EntryRecord> cursor = segment.getCursor(0);
try {
cursor.next();
fail();
} catch (IOException e) {
// expected from mocking
}
// when
cursor.close();
// then
verify(readerPool, never()).release(reader);
verify(reader).close();
}
}
use of org.neo4j.causalclustering.core.consensus.log.EntryRecord in project neo4j by neo4j.
the class RecoveryProtocol method run.
State run() throws IOException, DamagedLogStorageException, DisposedException {
State state = new State();
SortedMap<Long, File> files = fileNames.getAllFiles(fileSystem, log);
if (files.entrySet().isEmpty()) {
state.segments = new Segments(fileSystem, fileNames, readerPool, emptyList(), contentMarshal, logProvider, -1);
state.segments.rotate(-1, -1, -1);
state.terms = new Terms(-1, -1);
return state;
}
List<SegmentFile> segmentFiles = new ArrayList<>();
SegmentFile segment = null;
long expectedVersion = files.firstKey();
boolean mustRecoverLastHeader = false;
// the first file is treated the same as a skip
boolean skip = true;
for (Map.Entry<Long, File> entry : files.entrySet()) {
long fileNameVersion = entry.getKey();
File file = entry.getValue();
SegmentHeader header;
checkVersionSequence(fileNameVersion, expectedVersion);
try {
header = loadHeader(fileSystem, file);
checkVersionMatches(header.version(), fileNameVersion);
} catch (EndOfStreamException e) {
if (files.lastKey() != fileNameVersion) {
throw new DamagedLogStorageException(e, "Intermediate file with incomplete or no header found: %s", file);
} else if (files.size() == 1) {
throw new DamagedLogStorageException(e, "Single file with incomplete or no header found: %s", file);
}
/* Last file header must be recovered by scanning next-to-last file and writing a new header based on that. */
mustRecoverLastHeader = true;
break;
}
segment = new SegmentFile(fileSystem, file, readerPool, fileNameVersion, contentMarshal, logProvider, header);
segmentFiles.add(segment);
if (segment.header().prevIndex() != segment.header().prevFileLastIndex()) {
log.info(format("Skipping from index %d to %d.", segment.header().prevFileLastIndex(), segment.header().prevIndex() + 1));
skip = true;
}
if (skip) {
state.prevIndex = segment.header().prevIndex();
state.prevTerm = segment.header().prevTerm();
skip = false;
}
expectedVersion++;
}
assert segment != null;
state.appendIndex = segment.header().prevIndex();
state.terms = new Terms(segment.header().prevIndex(), segment.header().prevTerm());
try (IOCursor<EntryRecord> cursor = segment.getCursor(segment.header().prevIndex() + 1)) {
while (cursor.next()) {
EntryRecord entry = cursor.get();
state.appendIndex = entry.logIndex();
state.terms.append(state.appendIndex, entry.logEntry().term());
}
}
if (mustRecoverLastHeader) {
SegmentHeader header = new SegmentHeader(state.appendIndex, expectedVersion, state.appendIndex, state.terms.latest());
log.warn("Recovering last file based on next-to-last file. " + header);
File file = fileNames.getForVersion(expectedVersion);
writeHeader(fileSystem, file, header);
segment = new SegmentFile(fileSystem, file, readerPool, expectedVersion, contentMarshal, logProvider, header);
segmentFiles.add(segment);
}
state.segments = new Segments(fileSystem, fileNames, readerPool, segmentFiles, contentMarshal, logProvider, segment.header().version());
return state;
}
Aggregations