use of org.apache.lucene.store.BufferedChecksumIndexInput in project lucene-solr by apache.
the class ReplicaNode method finishNRTCopy.
void finishNRTCopy(CopyJob job, long startNS) throws IOException {
CopyState copyState = job.getCopyState();
message("top: finishNRTCopy: version=" + copyState.version + (job.getFailed() ? " FAILED" : "") + " job=" + job);
synchronized (this) {
if ("syncing".equals(state)) {
state = "idle";
}
if (curNRTCopy == job) {
message("top: now clear curNRTCopy; job=" + job);
curNRTCopy = null;
} else {
assert job.getFailed();
message("top: skip clear curNRTCopy: we were cancelled; job=" + job);
}
if (job.getFailed()) {
return;
}
// Does final file renames:
job.finish();
// Turn byte[] back to SegmentInfos:
byte[] infosBytes = copyState.infosBytes;
SegmentInfos infos = SegmentInfos.readCommit(dir, new BufferedChecksumIndexInput(new ByteArrayIndexInput("SegmentInfos", copyState.infosBytes)), copyState.gen);
assert infos.getVersion() == copyState.version;
message(" version=" + infos.getVersion() + " segments=" + infos.toString());
// Cutover to new searcher:
if (mgr != null) {
((SegmentInfosSearcherManager) mgr).setCurrentInfos(infos);
}
// Must first incRef new NRT files, then decRef old ones, to make sure we don't remove an NRT file that's in common to both:
Collection<String> newFiles = copyState.files.keySet();
message("top: incRef newNRTFiles=" + newFiles);
deleter.incRef(newFiles);
// If any of our new files were previously copied merges, we clear them now, so we don't try to later delete a non-existent file:
pendingMergeFiles.removeAll(newFiles);
message("top: after remove from pending merges pendingMergeFiles=" + pendingMergeFiles);
message("top: decRef lastNRTFiles=" + lastNRTFiles);
deleter.decRef(lastNRTFiles);
lastNRTFiles.clear();
lastNRTFiles.addAll(newFiles);
message("top: set lastNRTFiles=" + lastNRTFiles);
// in an NRT point:
if (copyState.completedMergeFiles.isEmpty() == false) {
message("now remove-if-not-ref'd completed merge files: " + copyState.completedMergeFiles);
for (String fileName : copyState.completedMergeFiles) {
if (pendingMergeFiles.contains(fileName)) {
pendingMergeFiles.remove(fileName);
deleter.deleteIfNoRef(fileName);
}
}
}
lastFileMetaData = copyState.files;
}
int markerCount;
IndexSearcher s = mgr.acquire();
try {
markerCount = s.count(new TermQuery(new Term("marker", "marker")));
} finally {
mgr.release(s);
}
message(String.format(Locale.ROOT, "top: done sync: took %.3fs for %s, opened NRT reader version=%d markerCount=%d", (System.nanoTime() - startNS) / 1000000000.0, bytesToString(job.getTotalBytesCopied()), copyState.version, markerCount));
}
use of org.apache.lucene.store.BufferedChecksumIndexInput in project lucene-solr by apache.
the class SimpleTextFieldsReader method readFields.
private TreeMap<String, Long> readFields(IndexInput in) throws IOException {
ChecksumIndexInput input = new BufferedChecksumIndexInput(in);
BytesRefBuilder scratch = new BytesRefBuilder();
TreeMap<String, Long> fields = new TreeMap<>();
while (true) {
SimpleTextUtil.readLine(input, scratch);
if (scratch.get().equals(END)) {
SimpleTextUtil.checkFooter(input);
return fields;
} else if (StringHelper.startsWith(scratch.get(), FIELD)) {
String fieldName = new String(scratch.bytes(), FIELD.length, scratch.length() - FIELD.length, StandardCharsets.UTF_8);
fields.put(fieldName, input.getFilePointer());
}
}
}
use of org.apache.lucene.store.BufferedChecksumIndexInput in project lucene-solr by apache.
the class SimpleTextStoredFieldsReader method readIndex.
// we don't actually write a .fdx-like index, instead we read the
// stored fields file in entirety up-front and save the offsets
// so we can seek to the documents later.
private void readIndex(int size) throws IOException {
ChecksumIndexInput input = new BufferedChecksumIndexInput(in);
offsets = new long[size];
int upto = 0;
while (!scratch.get().equals(END)) {
SimpleTextUtil.readLine(input, scratch);
if (StringHelper.startsWith(scratch.get(), DOC)) {
offsets[upto] = input.getFilePointer();
upto++;
}
}
SimpleTextUtil.checkFooter(input);
assert upto == offsets.length;
}
use of org.apache.lucene.store.BufferedChecksumIndexInput in project lucene-solr by apache.
the class SimpleTextTermVectorsReader method readIndex.
// we don't actually write a .tvx-like index, instead we read the
// vectors file in entirety up-front and save the offsets
// so we can seek to the data later.
private void readIndex(int maxDoc) throws IOException {
ChecksumIndexInput input = new BufferedChecksumIndexInput(in);
offsets = new long[maxDoc];
int upto = 0;
while (!scratch.get().equals(END)) {
SimpleTextUtil.readLine(input, scratch);
if (StringHelper.startsWith(scratch.get(), DOC)) {
offsets[upto] = input.getFilePointer();
upto++;
}
}
SimpleTextUtil.checkFooter(input);
assert upto == offsets.length;
}
use of org.apache.lucene.store.BufferedChecksumIndexInput in project lucene-solr by apache.
the class TestCodecUtil method testCheckFooterValid.
public void testCheckFooterValid() throws Exception {
RAMFile file = new RAMFile();
IndexOutput output = new RAMOutputStream(file, true);
CodecUtil.writeHeader(output, "FooBar", 5);
output.writeString("this is the data");
CodecUtil.writeFooter(output);
output.close();
ChecksumIndexInput input = new BufferedChecksumIndexInput(new RAMInputStream("file", file));
Exception mine = new RuntimeException("fake exception");
RuntimeException expected = expectThrows(RuntimeException.class, () -> {
CodecUtil.checkFooter(input, mine);
});
assertEquals("fake exception", expected.getMessage());
Throwable[] suppressed = expected.getSuppressed();
assertEquals(1, suppressed.length);
assertTrue(suppressed[0].getMessage().contains("checksum passed"));
input.close();
}
Aggregations