use of org.apache.lucene.util.InfoStream in project lucene-solr by apache.
the class CloseIndexTask method doLogic.
@Override
public int doLogic() throws IOException {
IndexWriter iw = getRunData().getIndexWriter();
if (iw != null) {
// If infoStream was set to output to a file, close it.
InfoStream infoStream = iw.getConfig().getInfoStream();
if (infoStream != null) {
infoStream.close();
}
if (doWait == false) {
iw.commit();
iw.rollback();
} else {
iw.close();
}
getRunData().setIndexWriter(null);
}
return 1;
}
use of org.apache.lucene.util.InfoStream in project lucene-solr by apache.
the class TestInfoStream method testTestPointsOn.
/** but they should work when we need */
public void testTestPointsOn() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = new IndexWriterConfig(null);
AtomicBoolean seenTestPoint = new AtomicBoolean();
iwc.setInfoStream(new InfoStream() {
@Override
public void close() throws IOException {
}
@Override
public void message(String component, String message) {
if ("TP".equals(component)) {
seenTestPoint.set(true);
}
}
@Override
public boolean isEnabled(String component) {
return true;
}
});
IndexWriter iw = new IndexWriter(dir, iwc);
iw.enableTestPoints = true;
iw.addDocument(new Document());
iw.close();
dir.close();
assertTrue(seenTestPoint.get());
}
use of org.apache.lucene.util.InfoStream in project lucene-solr by apache.
the class TestIndexWriterReader method testSimpleMergedSegmentWarmer.
public void testSimpleMergedSegmentWarmer() throws Exception {
Directory dir = newDirectory();
final AtomicBoolean didWarm = new AtomicBoolean();
InfoStream infoStream = new InfoStream() {
@Override
public void close() throws IOException {
}
@Override
public void message(String component, String message) {
if ("SMSW".equals(component)) {
didWarm.set(true);
}
}
@Override
public boolean isEnabled(String component) {
return true;
}
};
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setMaxBufferedDocs(2).setReaderPooling(true).setInfoStream(infoStream).setMergedSegmentWarmer(new SimpleMergedSegmentWarmer(infoStream)).setMergePolicy(newLogMergePolicy(10)));
Document doc = new Document();
doc.add(newStringField("foo", "bar", Field.Store.NO));
for (int i = 0; i < 20; i++) {
w.addDocument(doc);
}
w.waitForMerges();
w.close();
dir.close();
assertTrue(didWarm.get());
}
use of org.apache.lucene.util.InfoStream in project lucene-solr by apache.
the class IndexWriter method mergeMiddle.
/** Does the actual (time-consuming) work of the merge,
* but without holding synchronized lock on IndexWriter
* instance */
private int mergeMiddle(MergePolicy.OneMerge merge, MergePolicy mergePolicy) throws IOException {
merge.checkAborted();
Directory mergeDirectory = config.getMergeScheduler().wrapForMerge(merge, directory);
List<SegmentCommitInfo> sourceSegments = merge.segments;
IOContext context = new IOContext(merge.getStoreMergeInfo());
final TrackingDirectoryWrapper dirWrapper = new TrackingDirectoryWrapper(mergeDirectory);
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "merging " + segString(merge.segments));
}
merge.readers = new ArrayList<>(sourceSegments.size());
// This is try/finally to make sure merger's readers are
// closed:
boolean success = false;
try {
int segUpto = 0;
while (segUpto < sourceSegments.size()) {
final SegmentCommitInfo info = sourceSegments.get(segUpto);
// Hold onto the "live" reader; we will use this to
// commit merged deletes
final ReadersAndUpdates rld = readerPool.get(info, true);
// Carefully pull the most recent live docs and reader
SegmentReader reader;
final Bits liveDocs;
final int delCount;
synchronized (this) {
// Must sync to ensure BufferedDeletesStream cannot change liveDocs,
// pendingDeleteCount and field updates while we pull a copy:
reader = rld.getReaderForMerge(context);
liveDocs = rld.getReadOnlyLiveDocs();
delCount = rld.getPendingDeleteCount() + info.getDelCount();
assert reader != null;
assert rld.verifyDocCounts();
if (infoStream.isEnabled("IW")) {
if (rld.getPendingDeleteCount() != 0) {
infoStream.message("IW", "seg=" + segString(info) + " delCount=" + info.getDelCount() + " pendingDelCount=" + rld.getPendingDeleteCount());
} else if (info.getDelCount() != 0) {
infoStream.message("IW", "seg=" + segString(info) + " delCount=" + info.getDelCount());
} else {
infoStream.message("IW", "seg=" + segString(info) + " no deletes");
}
}
}
// make a new reader with updated live docs and del count.
if (reader.numDeletedDocs() != delCount) {
// beware of zombies
assert delCount > reader.numDeletedDocs();
SegmentReader newReader;
synchronized (this) {
// We must also sync on IW here, because another thread could be writing
// new DV updates / remove old gen field infos files causing FNFE:
newReader = new SegmentReader(info, reader, liveDocs, info.info.maxDoc() - delCount);
}
boolean released = false;
try {
rld.release(reader);
released = true;
} finally {
if (!released) {
newReader.decRef();
}
}
reader = newReader;
}
merge.readers.add(reader);
assert delCount <= info.info.maxDoc() : "delCount=" + delCount + " info.maxDoc=" + info.info.maxDoc() + " rld.pendingDeleteCount=" + rld.getPendingDeleteCount() + " info.getDelCount()=" + info.getDelCount();
segUpto++;
}
// System.out.println("[" + Thread.currentThread().getName() + "] IW.mergeMiddle: merging " + merge.getMergeReaders());
// Let the merge wrap readers
List<CodecReader> mergeReaders = new ArrayList<>();
for (SegmentReader reader : merge.readers) {
CodecReader wrappedReader = merge.wrapForMerge(reader);
validateMergeReader(wrappedReader);
mergeReaders.add(wrappedReader);
}
final SegmentMerger merger = new SegmentMerger(mergeReaders, merge.info.info, infoStream, dirWrapper, globalFieldNumberMap, context);
merge.checkAborted();
merge.mergeStartNS = System.nanoTime();
// This is where all the work happens:
if (merger.shouldMerge()) {
merger.merge();
}
MergeState mergeState = merger.mergeState;
assert mergeState.segmentInfo == merge.info.info;
merge.info.info.setFiles(new HashSet<>(dirWrapper.getCreatedFiles()));
if (infoStream.isEnabled("IW")) {
if (merger.shouldMerge()) {
String pauseInfo = merge.getMergeProgress().getPauseTimes().entrySet().stream().filter((e) -> e.getValue() > 0).map((e) -> String.format(Locale.ROOT, "%.1f sec %s", e.getValue() / 1000000000., e.getKey().name().toLowerCase(Locale.ROOT))).collect(Collectors.joining(", "));
if (!pauseInfo.isEmpty()) {
pauseInfo = " (" + pauseInfo + ")";
}
long t1 = System.nanoTime();
double sec = (t1 - merge.mergeStartNS) / 1000000000.;
double segmentMB = (merge.info.sizeInBytes() / 1024. / 1024.);
infoStream.message("IW", "merge codec=" + codec + " maxDoc=" + merge.info.info.maxDoc() + "; merged segment has " + (mergeState.mergeFieldInfos.hasVectors() ? "vectors" : "no vectors") + "; " + (mergeState.mergeFieldInfos.hasNorms() ? "norms" : "no norms") + "; " + (mergeState.mergeFieldInfos.hasDocValues() ? "docValues" : "no docValues") + "; " + (mergeState.mergeFieldInfos.hasProx() ? "prox" : "no prox") + "; " + (mergeState.mergeFieldInfos.hasProx() ? "freqs" : "no freqs") + "; " + (mergeState.mergeFieldInfos.hasPointValues() ? "points" : "no points") + "; " + String.format(Locale.ROOT, "%.1f sec%s to merge segment [%.2f MB, %.2f MB/sec]", sec, pauseInfo, segmentMB, segmentMB / sec));
} else {
infoStream.message("IW", "skip merging fully deleted segments");
}
}
if (merger.shouldMerge() == false) {
// Merge would produce a 0-doc segment, so we do nothing except commit the merge to remove all the 0-doc segments that we "merged":
assert merge.info.info.maxDoc() == 0;
commitMerge(merge, mergeState);
return 0;
}
assert merge.info.info.maxDoc() > 0;
// Very important to do this before opening the reader
// because codec must know if prox was written for
// this segment:
//System.out.println("merger set hasProx=" + merger.hasProx() + " seg=" + merge.info.name);
boolean useCompoundFile;
synchronized (this) {
// Guard segmentInfos
useCompoundFile = mergePolicy.useCompoundFile(segmentInfos, merge.info, this);
}
if (useCompoundFile) {
success = false;
Collection<String> filesToRemove = merge.info.files();
TrackingDirectoryWrapper trackingCFSDir = new TrackingDirectoryWrapper(mergeDirectory);
try {
createCompoundFile(infoStream, trackingCFSDir, merge.info.info, context);
success = true;
} catch (Throwable t) {
synchronized (this) {
if (merge.isAborted()) {
// merged files:
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "hit merge abort exception creating compound file during merge");
}
return 0;
} else {
handleMergeException(t, merge);
}
}
} finally {
if (success == false) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "hit exception creating compound file during merge");
}
// Safe: these files must exist
deleteNewFiles(merge.info.files());
}
}
// So that, if we hit exc in deleteNewFiles (next)
// or in commitMerge (later), we close the
// per-segment readers in the finally clause below:
success = false;
synchronized (this) {
// delete new non cfs files directly: they were never
// registered with IFD
deleteNewFiles(filesToRemove);
if (merge.isAborted()) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "abort merge after building CFS");
}
// Safe: these files must exist
deleteNewFiles(merge.info.files());
return 0;
}
}
merge.info.info.setUseCompoundFile(true);
} else {
// So that, if we hit exc in commitMerge (later),
// we close the per-segment readers in the finally
// clause below:
success = false;
}
// Have codec write SegmentInfo. Must do this after
// creating CFS so that 1) .si isn't slurped into CFS,
// and 2) .si reflects useCompoundFile=true change
// above:
boolean success2 = false;
try {
codec.segmentInfoFormat().write(directory, merge.info.info, context);
success2 = true;
} finally {
if (!success2) {
// Safe: these files must exist
deleteNewFiles(merge.info.files());
}
}
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", String.format(Locale.ROOT, "merged segment size=%.3f MB vs estimate=%.3f MB", merge.info.sizeInBytes() / 1024. / 1024., merge.estimatedMergeBytes / 1024 / 1024.));
}
final IndexReaderWarmer mergedSegmentWarmer = config.getMergedSegmentWarmer();
if (poolReaders && mergedSegmentWarmer != null) {
final ReadersAndUpdates rld = readerPool.get(merge.info, true);
final SegmentReader sr = rld.getReader(IOContext.READ);
try {
mergedSegmentWarmer.warm(sr);
} finally {
synchronized (this) {
rld.release(sr);
readerPool.release(rld);
}
}
}
if (!commitMerge(merge, mergeState)) {
// aborted
return 0;
}
success = true;
} finally {
// an exc:
if (success == false) {
closeMergeReaders(merge, true);
}
}
return merge.info.info.maxDoc();
}
use of org.apache.lucene.util.InfoStream in project lucene-solr by apache.
the class IndexUpgrader method parseArgs.
@SuppressForbidden(reason = "System.out required: command line tool")
static IndexUpgrader parseArgs(String[] args) throws IOException {
String path = null;
boolean deletePriorCommits = false;
InfoStream out = null;
String dirImpl = null;
int i = 0;
while (i < args.length) {
String arg = args[i];
if ("-delete-prior-commits".equals(arg)) {
deletePriorCommits = true;
} else if ("-verbose".equals(arg)) {
out = new PrintStreamInfoStream(System.out);
} else if ("-dir-impl".equals(arg)) {
if (i == args.length - 1) {
System.out.println("ERROR: missing value for -dir-impl option");
System.exit(1);
}
i++;
dirImpl = args[i];
} else if (path == null) {
path = arg;
} else {
printUsage();
}
i++;
}
if (path == null) {
printUsage();
}
Path p = Paths.get(path);
Directory dir = null;
if (dirImpl == null) {
dir = FSDirectory.open(p);
} else {
dir = CommandLineUtil.newFSDirectory(dirImpl, p);
}
return new IndexUpgrader(dir, out, deletePriorCommits);
}
Aggregations