use of org.apache.lucene.index.SegmentCommitInfo in project elasticsearch by elastic.
the class Lucene method files.
/**
* Returns an iterable that allows to iterate over all files in this segments info
*/
public static Iterable<String> files(SegmentInfos infos) throws IOException {
final List<Collection<String>> list = new ArrayList<>();
list.add(Collections.singleton(infos.getSegmentsFileName()));
for (SegmentCommitInfo info : infos) {
list.add(info.files());
}
return Iterables.flatten(list);
}
use of org.apache.lucene.index.SegmentCommitInfo in project elasticsearch by elastic.
the class Engine method getSegmentInfo.
protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boolean verbose) {
ensureOpen();
Map<String, Segment> segments = new HashMap<>();
// first, go over and compute the search ones...
Searcher searcher = acquireSearcher("segments");
try {
for (LeafReaderContext reader : searcher.reader().leaves()) {
SegmentCommitInfo info = segmentReader(reader.reader()).getSegmentInfo();
assert !segments.containsKey(info.info.name);
Segment segment = new Segment(info.info.name);
segment.search = true;
segment.docCount = reader.reader().numDocs();
segment.delDocCount = reader.reader().numDeletedDocs();
segment.version = info.info.getVersion();
segment.compound = info.info.getUseCompoundFile();
try {
segment.sizeInBytes = info.sizeInBytes();
} catch (IOException e) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
}
final SegmentReader segmentReader = segmentReader(reader.reader());
segment.memoryInBytes = segmentReader.ramBytesUsed();
if (verbose) {
segment.ramTree = Accountables.namedAccountable("root", segmentReader);
}
// TODO: add more fine grained mem stats values to per segment info here
segments.put(info.info.name, segment);
}
} finally {
searcher.close();
}
// now, correlate or add the committed ones...
if (lastCommittedSegmentInfos != null) {
SegmentInfos infos = lastCommittedSegmentInfos;
for (SegmentCommitInfo info : infos) {
Segment segment = segments.get(info.info.name);
if (segment == null) {
segment = new Segment(info.info.name);
segment.search = false;
segment.committed = true;
segment.docCount = info.info.maxDoc();
segment.delDocCount = info.getDelCount();
segment.version = info.info.getVersion();
segment.compound = info.info.getUseCompoundFile();
try {
segment.sizeInBytes = info.sizeInBytes();
} catch (IOException e) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
}
segments.put(info.info.name, segment);
} else {
segment.committed = true;
}
}
}
Segment[] segmentsArr = segments.values().toArray(new Segment[segments.values().size()]);
Arrays.sort(segmentsArr, new Comparator<Segment>() {
@Override
public int compare(Segment o1, Segment o2) {
return (int) (o1.getGeneration() - o2.getGeneration());
}
});
return segmentsArr;
}
use of org.apache.lucene.index.SegmentCommitInfo in project lucene-solr by apache.
the class SegmentsInfoRequestHandler method getMergeCandidatesNames.
private List<String> getMergeCandidatesNames(SolrQueryRequest req, SegmentInfos infos) throws IOException {
List<String> result = new ArrayList<String>();
RefCounted<IndexWriter> refCounted = req.getCore().getSolrCoreState().getIndexWriter(req.getCore());
try {
IndexWriter indexWriter = refCounted.get();
//get chosen merge policy
MergePolicy mp = indexWriter.getConfig().getMergePolicy();
//Find merges
MergeSpecification findMerges = mp.findMerges(MergeTrigger.EXPLICIT, infos, indexWriter);
if (findMerges != null && findMerges.merges != null && findMerges.merges.size() > 0) {
for (OneMerge merge : findMerges.merges) {
//TODO: add merge grouping
for (SegmentCommitInfo mergeSegmentInfo : merge.segments) {
result.add(mergeSegmentInfo.info.name);
}
}
}
return result;
} finally {
refCounted.decref();
}
}
use of org.apache.lucene.index.SegmentCommitInfo in project lucene-solr by apache.
the class SegmentsInfoRequestHandler method getSegmentsInfo.
private SimpleOrderedMap<Object> getSegmentsInfo(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
SolrIndexSearcher searcher = req.getSearcher();
SegmentInfos infos = SegmentInfos.readLatestCommit(searcher.getIndexReader().directory());
List<String> mergeCandidates = getMergeCandidatesNames(req, infos);
SimpleOrderedMap<Object> segmentInfos = new SimpleOrderedMap<>();
SimpleOrderedMap<Object> segmentInfo = null;
for (SegmentCommitInfo segmentCommitInfo : infos) {
segmentInfo = getSegmentInfo(segmentCommitInfo);
if (mergeCandidates.contains(segmentCommitInfo.info.name)) {
segmentInfo.add("mergeCandidate", true);
}
segmentInfos.add((String) segmentInfo.get(NAME), segmentInfo);
}
return segmentInfos;
}
use of org.apache.lucene.index.SegmentCommitInfo in project lucene-solr by apache.
the class PreCopyMergedSegmentWarmer method warm.
@Override
public void warm(LeafReader reader) throws IOException {
long startNS = System.nanoTime();
final SegmentCommitInfo info = ((SegmentReader) reader).getSegmentInfo();
//System.out.println("TEST: warm merged segment files " + info);
Map<String, FileMetaData> filesMetaData = new HashMap<>();
for (String fileName : info.files()) {
FileMetaData metaData = primary.readLocalFileMetaData(fileName);
assert metaData != null;
assert filesMetaData.containsKey(fileName) == false;
filesMetaData.put(fileName, metaData);
}
primary.preCopyMergedSegmentFiles(info, filesMetaData);
primary.message(String.format(Locale.ROOT, "top: done warm merge " + info + ": took %.3f sec, %.1f MB", (System.nanoTime() - startNS) / 1000000000., info.sizeInBytes() / 1024 / 1024.));
primary.finishedMergedFiles.addAll(filesMetaData.keySet());
}
Aggregations