use of org.apache.lucene.store.IOContext in project lucene-solr by apache.
the class TermVectorsConsumer method initTermVectorsWriter.
void initTermVectorsWriter() throws IOException {
if (writer == null) {
IOContext context = new IOContext(new FlushInfo(docWriter.getNumDocsInRAM(), docWriter.bytesUsed()));
writer = docWriter.codec.termVectorsFormat().vectorsWriter(docWriter.directory, docWriter.getSegmentInfo(), context);
lastDocID = 0;
}
}
use of org.apache.lucene.store.IOContext in project lucene-solr by apache.
the class TestDoc method merge.
private SegmentCommitInfo merge(Directory dir, SegmentCommitInfo si1, SegmentCommitInfo si2, String merged, boolean useCompoundFile) throws Exception {
IOContext context = newIOContext(random(), new IOContext(new MergeInfo(-1, -1, false, -1)));
SegmentReader r1 = new SegmentReader(si1, Version.LATEST.major, context);
SegmentReader r2 = new SegmentReader(si2, Version.LATEST.major, context);
final Codec codec = Codec.getDefault();
TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(si1.info.dir);
final SegmentInfo si = new SegmentInfo(si1.info.dir, Version.LATEST, null, merged, -1, false, codec, Collections.emptyMap(), StringHelper.randomId(), new HashMap<>(), null);
SegmentMerger merger = new SegmentMerger(Arrays.<CodecReader>asList(r1, r2), si, InfoStream.getDefault(), trackingDir, new FieldInfos.FieldNumbers(), context);
MergeState mergeState = merger.merge();
r1.close();
r2.close();
;
si.setFiles(new HashSet<>(trackingDir.getCreatedFiles()));
if (useCompoundFile) {
Collection<String> filesToDelete = si.files();
codec.compoundFormat().write(dir, si, context);
si.setUseCompoundFile(true);
for (String name : filesToDelete) {
si1.info.dir.deleteFile(name);
}
}
return new SegmentCommitInfo(si, 0, -1L, -1L, -1L);
}
use of org.apache.lucene.store.IOContext in project lucene-solr by apache.
the class IndexWriter method mergeMiddle.
/** Does the actual (time-consuming) work of the merge,
* but without holding synchronized lock on IndexWriter
* instance */
private int mergeMiddle(MergePolicy.OneMerge merge, MergePolicy mergePolicy) throws IOException {
merge.checkAborted();
Directory mergeDirectory = config.getMergeScheduler().wrapForMerge(merge, directory);
List<SegmentCommitInfo> sourceSegments = merge.segments;
IOContext context = new IOContext(merge.getStoreMergeInfo());
final TrackingDirectoryWrapper dirWrapper = new TrackingDirectoryWrapper(mergeDirectory);
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "merging " + segString(merge.segments));
}
merge.readers = new ArrayList<>(sourceSegments.size());
// This is try/finally to make sure merger's readers are
// closed:
boolean success = false;
try {
int segUpto = 0;
while (segUpto < sourceSegments.size()) {
final SegmentCommitInfo info = sourceSegments.get(segUpto);
// Hold onto the "live" reader; we will use this to
// commit merged deletes
final ReadersAndUpdates rld = readerPool.get(info, true);
// Carefully pull the most recent live docs and reader
SegmentReader reader;
final Bits liveDocs;
final int delCount;
synchronized (this) {
// Must sync to ensure BufferedDeletesStream cannot change liveDocs,
// pendingDeleteCount and field updates while we pull a copy:
reader = rld.getReaderForMerge(context);
liveDocs = rld.getReadOnlyLiveDocs();
delCount = rld.getPendingDeleteCount() + info.getDelCount();
assert reader != null;
assert rld.verifyDocCounts();
if (infoStream.isEnabled("IW")) {
if (rld.getPendingDeleteCount() != 0) {
infoStream.message("IW", "seg=" + segString(info) + " delCount=" + info.getDelCount() + " pendingDelCount=" + rld.getPendingDeleteCount());
} else if (info.getDelCount() != 0) {
infoStream.message("IW", "seg=" + segString(info) + " delCount=" + info.getDelCount());
} else {
infoStream.message("IW", "seg=" + segString(info) + " no deletes");
}
}
}
// make a new reader with updated live docs and del count.
if (reader.numDeletedDocs() != delCount) {
// beware of zombies
assert delCount > reader.numDeletedDocs();
SegmentReader newReader;
synchronized (this) {
// We must also sync on IW here, because another thread could be writing
// new DV updates / remove old gen field infos files causing FNFE:
newReader = new SegmentReader(info, reader, liveDocs, info.info.maxDoc() - delCount);
}
boolean released = false;
try {
rld.release(reader);
released = true;
} finally {
if (!released) {
newReader.decRef();
}
}
reader = newReader;
}
merge.readers.add(reader);
assert delCount <= info.info.maxDoc() : "delCount=" + delCount + " info.maxDoc=" + info.info.maxDoc() + " rld.pendingDeleteCount=" + rld.getPendingDeleteCount() + " info.getDelCount()=" + info.getDelCount();
segUpto++;
}
// System.out.println("[" + Thread.currentThread().getName() + "] IW.mergeMiddle: merging " + merge.getMergeReaders());
// Let the merge wrap readers
List<CodecReader> mergeReaders = new ArrayList<>();
for (SegmentReader reader : merge.readers) {
CodecReader wrappedReader = merge.wrapForMerge(reader);
validateMergeReader(wrappedReader);
mergeReaders.add(wrappedReader);
}
final SegmentMerger merger = new SegmentMerger(mergeReaders, merge.info.info, infoStream, dirWrapper, globalFieldNumberMap, context);
merge.checkAborted();
merge.mergeStartNS = System.nanoTime();
// This is where all the work happens:
if (merger.shouldMerge()) {
merger.merge();
}
MergeState mergeState = merger.mergeState;
assert mergeState.segmentInfo == merge.info.info;
merge.info.info.setFiles(new HashSet<>(dirWrapper.getCreatedFiles()));
if (infoStream.isEnabled("IW")) {
if (merger.shouldMerge()) {
String pauseInfo = merge.getMergeProgress().getPauseTimes().entrySet().stream().filter((e) -> e.getValue() > 0).map((e) -> String.format(Locale.ROOT, "%.1f sec %s", e.getValue() / 1000000000., e.getKey().name().toLowerCase(Locale.ROOT))).collect(Collectors.joining(", "));
if (!pauseInfo.isEmpty()) {
pauseInfo = " (" + pauseInfo + ")";
}
long t1 = System.nanoTime();
double sec = (t1 - merge.mergeStartNS) / 1000000000.;
double segmentMB = (merge.info.sizeInBytes() / 1024. / 1024.);
infoStream.message("IW", "merge codec=" + codec + " maxDoc=" + merge.info.info.maxDoc() + "; merged segment has " + (mergeState.mergeFieldInfos.hasVectors() ? "vectors" : "no vectors") + "; " + (mergeState.mergeFieldInfos.hasNorms() ? "norms" : "no norms") + "; " + (mergeState.mergeFieldInfos.hasDocValues() ? "docValues" : "no docValues") + "; " + (mergeState.mergeFieldInfos.hasProx() ? "prox" : "no prox") + "; " + (mergeState.mergeFieldInfos.hasProx() ? "freqs" : "no freqs") + "; " + (mergeState.mergeFieldInfos.hasPointValues() ? "points" : "no points") + "; " + String.format(Locale.ROOT, "%.1f sec%s to merge segment [%.2f MB, %.2f MB/sec]", sec, pauseInfo, segmentMB, segmentMB / sec));
} else {
infoStream.message("IW", "skip merging fully deleted segments");
}
}
if (merger.shouldMerge() == false) {
// Merge would produce a 0-doc segment, so we do nothing except commit the merge to remove all the 0-doc segments that we "merged":
assert merge.info.info.maxDoc() == 0;
commitMerge(merge, mergeState);
return 0;
}
assert merge.info.info.maxDoc() > 0;
// Very important to do this before opening the reader
// because codec must know if prox was written for
// this segment:
//System.out.println("merger set hasProx=" + merger.hasProx() + " seg=" + merge.info.name);
boolean useCompoundFile;
synchronized (this) {
// Guard segmentInfos
useCompoundFile = mergePolicy.useCompoundFile(segmentInfos, merge.info, this);
}
if (useCompoundFile) {
success = false;
Collection<String> filesToRemove = merge.info.files();
TrackingDirectoryWrapper trackingCFSDir = new TrackingDirectoryWrapper(mergeDirectory);
try {
createCompoundFile(infoStream, trackingCFSDir, merge.info.info, context);
success = true;
} catch (Throwable t) {
synchronized (this) {
if (merge.isAborted()) {
// merged files:
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "hit merge abort exception creating compound file during merge");
}
return 0;
} else {
handleMergeException(t, merge);
}
}
} finally {
if (success == false) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "hit exception creating compound file during merge");
}
// Safe: these files must exist
deleteNewFiles(merge.info.files());
}
}
// So that, if we hit exc in deleteNewFiles (next)
// or in commitMerge (later), we close the
// per-segment readers in the finally clause below:
success = false;
synchronized (this) {
// delete new non cfs files directly: they were never
// registered with IFD
deleteNewFiles(filesToRemove);
if (merge.isAborted()) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "abort merge after building CFS");
}
// Safe: these files must exist
deleteNewFiles(merge.info.files());
return 0;
}
}
merge.info.info.setUseCompoundFile(true);
} else {
// So that, if we hit exc in commitMerge (later),
// we close the per-segment readers in the finally
// clause below:
success = false;
}
// Have codec write SegmentInfo. Must do this after
// creating CFS so that 1) .si isn't slurped into CFS,
// and 2) .si reflects useCompoundFile=true change
// above:
boolean success2 = false;
try {
codec.segmentInfoFormat().write(directory, merge.info.info, context);
success2 = true;
} finally {
if (!success2) {
// Safe: these files must exist
deleteNewFiles(merge.info.files());
}
}
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", String.format(Locale.ROOT, "merged segment size=%.3f MB vs estimate=%.3f MB", merge.info.sizeInBytes() / 1024. / 1024., merge.estimatedMergeBytes / 1024 / 1024.));
}
final IndexReaderWarmer mergedSegmentWarmer = config.getMergedSegmentWarmer();
if (poolReaders && mergedSegmentWarmer != null) {
final ReadersAndUpdates rld = readerPool.get(merge.info, true);
final SegmentReader sr = rld.getReader(IOContext.READ);
try {
mergedSegmentWarmer.warm(sr);
} finally {
synchronized (this) {
rld.release(sr);
readerPool.release(rld);
}
}
}
if (!commitMerge(merge, mergeState)) {
// aborted
return 0;
}
success = true;
} finally {
// an exc:
if (success == false) {
closeMergeReaders(merge, true);
}
}
return merge.info.info.maxDoc();
}
use of org.apache.lucene.store.IOContext in project lucene-solr by apache.
the class IndexWriter method addIndexes.
/**
* Adds all segments from an array of indexes into this index.
*
* <p>This may be used to parallelize batch indexing. A large document
* collection can be broken into sub-collections. Each sub-collection can be
* indexed in parallel, on a different thread, process or machine. The
* complete index can then be created by merging sub-collection indexes
* with this method.
*
* <p>
* <b>NOTE:</b> this method acquires the write lock in
* each directory, to ensure that no {@code IndexWriter}
* is currently open or tries to open while this is
* running.
*
* <p>This method is transactional in how Exceptions are
* handled: it does not commit a new segments_N file until
* all indexes are added. This means if an Exception
* occurs (for example disk full), then either no indexes
* will have been added or they all will have been.
*
* <p>Note that this requires temporary free space in the
* {@link Directory} up to 2X the sum of all input indexes
* (including the starting index). If readers/searchers
* are open against the starting index, then temporary
* free space required will be higher by the size of the
* starting index (see {@link #forceMerge(int)} for details).
*
* <p>This requires this index not be among those to be added.
*
* <p>All added indexes must have been created by the same
* Lucene version as this index.
*
* @return The <a href="#sequence_number">sequence number</a>
* for this operation
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
* @throws IllegalArgumentException if addIndexes would cause
* the index to exceed {@link #MAX_DOCS}, or if the indoming
* index sort does not match this index's index sort
*/
public long addIndexes(Directory... dirs) throws IOException {
ensureOpen();
noDupDirs(dirs);
List<Lock> locks = acquireWriteLocks(dirs);
Sort indexSort = config.getIndexSort();
boolean successTop = false;
long seqNo;
try {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "flush at addIndexes(Directory...)");
}
flush(false, true);
List<SegmentCommitInfo> infos = new ArrayList<>();
// long so we can detect int overflow:
long totalMaxDoc = 0;
List<SegmentInfos> commits = new ArrayList<>(dirs.length);
for (Directory dir : dirs) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "addIndexes: process directory " + dir);
}
// read infos from dir
SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
if (segmentInfos.getIndexCreatedVersionMajor() != sis.getIndexCreatedVersionMajor()) {
throw new IllegalArgumentException("Cannot use addIndexes(Directory) with indexes that have been created " + "by a different Lucene version. The current index was generated by Lucene " + segmentInfos.getIndexCreatedVersionMajor() + " while one of the directories contains an index that was generated with Lucene " + sis.getIndexCreatedVersionMajor());
}
totalMaxDoc += sis.totalMaxDoc();
commits.add(sis);
}
// Best-effort up front check:
testReserveDocs(totalMaxDoc);
boolean success = false;
try {
for (SegmentInfos sis : commits) {
for (SegmentCommitInfo info : sis) {
assert !infos.contains(info) : "dup info dir=" + info.info.dir + " name=" + info.info.name;
Sort segmentIndexSort = info.info.getIndexSort();
if (indexSort != null && segmentIndexSort != null && indexSort.equals(segmentIndexSort) == false) {
// TODO: we could make this smarter, e.g. if the incoming indexSort is congruent with our sort ("starts with") then it's OK
throw new IllegalArgumentException("cannot change index sort from " + segmentIndexSort + " to " + indexSort);
}
String newSegName = newSegmentName();
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "addIndexes: process segment origName=" + info.info.name + " newName=" + newSegName + " info=" + info);
}
IOContext context = new IOContext(new FlushInfo(info.info.maxDoc(), info.sizeInBytes()));
FieldInfos fis = readFieldInfos(info);
for (FieldInfo fi : fis) {
// This will throw exceptions if any of the incoming fields have an illegal schema change:
globalFieldNumberMap.addOrGet(fi.name, fi.number, fi.getDocValuesType(), fi.getPointDimensionCount(), fi.getPointNumBytes());
}
infos.add(copySegmentAsIs(info, newSegName, context));
}
}
success = true;
} finally {
if (!success) {
for (SegmentCommitInfo sipc : infos) {
// Safe: these files must exist
deleteNewFiles(sipc.files());
}
}
}
synchronized (this) {
success = false;
try {
ensureOpen();
// Now reserve the docs, just before we update SIS:
reserveDocs(totalMaxDoc);
seqNo = docWriter.deleteQueue.getNextSequenceNumber();
success = true;
} finally {
if (!success) {
for (SegmentCommitInfo sipc : infos) {
// Safe: these files must exist
deleteNewFiles(sipc.files());
}
}
}
segmentInfos.addAll(infos);
checkpoint();
}
successTop = true;
} catch (VirtualMachineError tragedy) {
tragicEvent(tragedy, "addIndexes(Directory...)");
// dead code but javac disagrees:
seqNo = -1;
} finally {
if (successTop) {
IOUtils.close(locks);
} else {
IOUtils.closeWhileHandlingException(locks);
}
}
maybeMerge();
return seqNo;
}
use of org.apache.lucene.store.IOContext in project lucene-solr by apache.
the class ReadersAndUpdates method handleNumericDVUpdates.
@SuppressWarnings("synthetic-access")
private void handleNumericDVUpdates(FieldInfos infos, Map<String, NumericDocValuesFieldUpdates> updates, Directory dir, DocValuesFormat dvFormat, final SegmentReader reader, Map<Integer, Set<String>> fieldFiles) throws IOException {
for (Entry<String, NumericDocValuesFieldUpdates> e : updates.entrySet()) {
final String field = e.getKey();
final NumericDocValuesFieldUpdates fieldUpdates = e.getValue();
final long nextDocValuesGen = info.getNextDocValuesGen();
final String segmentSuffix = Long.toString(nextDocValuesGen, Character.MAX_RADIX);
final long estUpdatesSize = fieldUpdates.ramBytesPerDoc() * info.info.maxDoc();
final IOContext updatesContext = new IOContext(new FlushInfo(info.info.maxDoc(), estUpdatesSize));
final FieldInfo fieldInfo = infos.fieldInfo(field);
assert fieldInfo != null;
fieldInfo.setDocValuesGen(nextDocValuesGen);
final FieldInfos fieldInfos = new FieldInfos(new FieldInfo[] { fieldInfo });
// separately also track which files were created for this gen
final TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(dir);
final SegmentWriteState state = new SegmentWriteState(null, trackingDir, info.info, fieldInfos, null, updatesContext, segmentSuffix);
try (final DocValuesConsumer fieldsConsumer = dvFormat.fieldsConsumer(state)) {
// write the numeric updates to a new gen'd docvalues file
fieldsConsumer.addNumericField(fieldInfo, new EmptyDocValuesProducer() {
@Override
public NumericDocValues getNumeric(FieldInfo fieldInfoIn) throws IOException {
if (fieldInfoIn != fieldInfo) {
throw new IllegalArgumentException("wrong fieldInfo");
}
final int maxDoc = reader.maxDoc();
final NumericDocValuesFieldUpdates.Iterator updatesIter = fieldUpdates.iterator();
final NumericDocValues currentValues = reader.getNumericDocValues(field);
updatesIter.reset();
// Merge sort of the original doc values with updated doc values:
return new NumericDocValues() {
// merged docID
private int docIDOut = -1;
// docID from our original doc values
private int docIDIn = -1;
// docID from our updates
private int updateDocID = -1;
private long value;
@Override
public int docID() {
return docIDOut;
}
@Override
public int advance(int target) {
throw new UnsupportedOperationException();
}
@Override
public boolean advanceExact(int target) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long cost() {
// TODO
return 0;
}
@Override
public long longValue() {
return value;
}
@Override
public int nextDoc() throws IOException {
if (docIDIn == docIDOut) {
if (currentValues == null) {
docIDIn = NO_MORE_DOCS;
} else {
docIDIn = currentValues.nextDoc();
}
}
if (updateDocID == docIDOut) {
updateDocID = updatesIter.nextDoc();
}
if (docIDIn < updateDocID) {
// no update to this doc
docIDOut = docIDIn;
value = currentValues.longValue();
} else {
docIDOut = updateDocID;
if (docIDOut != NO_MORE_DOCS) {
value = updatesIter.value();
}
}
return docIDOut;
}
};
}
});
}
info.advanceDocValuesGen();
assert !fieldFiles.containsKey(fieldInfo.number);
fieldFiles.put(fieldInfo.number, trackingDir.getCreatedFiles());
}
}
Aggregations