use of org.apache.lucene.index.SegmentReader in project elasticsearch by elastic.
the class CodecTests method assertCompressionEquals.
// write some docs with it, inspect .si to see this was the used compression
private void assertCompressionEquals(Mode expected, Codec actual) throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig(null);
iwc.setCodec(actual);
IndexWriter iw = new IndexWriter(dir, iwc);
iw.addDocument(new Document());
iw.commit();
iw.close();
DirectoryReader ir = DirectoryReader.open(dir);
SegmentReader sr = (SegmentReader) ir.leaves().get(0).reader();
String v = sr.getSegmentInfo().info.getAttribute(Lucene50StoredFieldsFormat.MODE_KEY);
assertNotNull(v);
assertEquals(expected, Mode.valueOf(v));
ir.close();
dir.close();
}
use of org.apache.lucene.index.SegmentReader in project elasticsearch by elastic.
the class Engine method getSegmentInfo.
protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boolean verbose) {
ensureOpen();
Map<String, Segment> segments = new HashMap<>();
// first, go over and compute the search ones...
Searcher searcher = acquireSearcher("segments");
try {
for (LeafReaderContext reader : searcher.reader().leaves()) {
SegmentCommitInfo info = segmentReader(reader.reader()).getSegmentInfo();
assert !segments.containsKey(info.info.name);
Segment segment = new Segment(info.info.name);
segment.search = true;
segment.docCount = reader.reader().numDocs();
segment.delDocCount = reader.reader().numDeletedDocs();
segment.version = info.info.getVersion();
segment.compound = info.info.getUseCompoundFile();
try {
segment.sizeInBytes = info.sizeInBytes();
} catch (IOException e) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
}
final SegmentReader segmentReader = segmentReader(reader.reader());
segment.memoryInBytes = segmentReader.ramBytesUsed();
if (verbose) {
segment.ramTree = Accountables.namedAccountable("root", segmentReader);
}
// TODO: add more fine grained mem stats values to per segment info here
segments.put(info.info.name, segment);
}
} finally {
searcher.close();
}
// now, correlate or add the committed ones...
if (lastCommittedSegmentInfos != null) {
SegmentInfos infos = lastCommittedSegmentInfos;
for (SegmentCommitInfo info : infos) {
Segment segment = segments.get(info.info.name);
if (segment == null) {
segment = new Segment(info.info.name);
segment.search = false;
segment.committed = true;
segment.docCount = info.info.maxDoc();
segment.delDocCount = info.getDelCount();
segment.version = info.info.getVersion();
segment.compound = info.info.getUseCompoundFile();
try {
segment.sizeInBytes = info.sizeInBytes();
} catch (IOException e) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
}
segments.put(info.info.name, segment);
} else {
segment.committed = true;
}
}
}
Segment[] segmentsArr = segments.values().toArray(new Segment[segments.values().size()]);
Arrays.sort(segmentsArr, new Comparator<Segment>() {
@Override
public int compare(Segment o1, Segment o2) {
return (int) (o1.getGeneration() - o2.getGeneration());
}
});
return segmentsArr;
}
use of org.apache.lucene.index.SegmentReader in project lucene-solr by apache.
the class AnalyzingInfixSuggester method ramBytesUsed.
@Override
public long ramBytesUsed() {
long mem = RamUsageEstimator.shallowSizeOf(this);
try {
if (searcherMgr != null) {
SearcherManager mgr;
IndexSearcher searcher;
synchronized (searcherMgrLock) {
// acquire & release on same SearcherManager, via local reference
mgr = searcherMgr;
searcher = mgr.acquire();
}
try {
for (LeafReaderContext context : searcher.getIndexReader().leaves()) {
LeafReader reader = FilterLeafReader.unwrap(context.reader());
if (reader instanceof SegmentReader) {
mem += ((SegmentReader) context.reader()).ramBytesUsed();
}
}
} finally {
mgr.release(searcher);
}
}
return mem;
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
use of org.apache.lucene.index.SegmentReader in project lucene-solr by apache.
the class AnalyzingInfixSuggester method getChildResources.
@Override
public Collection<Accountable> getChildResources() {
List<Accountable> resources = new ArrayList<>();
try {
if (searcherMgr != null) {
SearcherManager mgr;
IndexSearcher searcher;
synchronized (searcherMgrLock) {
// acquire & release on same SearcherManager, via local reference
mgr = searcherMgr;
searcher = mgr.acquire();
}
try {
for (LeafReaderContext context : searcher.getIndexReader().leaves()) {
LeafReader reader = FilterLeafReader.unwrap(context.reader());
if (reader instanceof SegmentReader) {
resources.add(Accountables.namedAccountable("segment", (SegmentReader) reader));
}
}
} finally {
mgr.release(searcher);
}
}
return Collections.unmodifiableList(resources);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
use of org.apache.lucene.index.SegmentReader in project lucene-solr by apache.
the class PreCopyMergedSegmentWarmer method warm.
@Override
public void warm(LeafReader reader) throws IOException {
long startNS = System.nanoTime();
final SegmentCommitInfo info = ((SegmentReader) reader).getSegmentInfo();
//System.out.println("TEST: warm merged segment files " + info);
Map<String, FileMetaData> filesMetaData = new HashMap<>();
for (String fileName : info.files()) {
FileMetaData metaData = primary.readLocalFileMetaData(fileName);
assert metaData != null;
assert filesMetaData.containsKey(fileName) == false;
filesMetaData.put(fileName, metaData);
}
primary.preCopyMergedSegmentFiles(info, filesMetaData);
primary.message(String.format(Locale.ROOT, "top: done warm merge " + info + ": took %.3f sec, %.1f MB", (System.nanoTime() - startNS) / 1000000000., info.sizeInBytes() / 1024 / 1024.));
primary.finishedMergedFiles.addAll(filesMetaData.keySet());
}
Aggregations