use of org.apache.lucene.index.IndexReader in project elasticsearch by elastic.
the class SimpleAllTests method testSimpleAllNoBoost.
public void testSimpleAllNoBoost() throws Exception {
FieldType allFt = getAllFieldType();
Directory dir = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
Document doc = new Document();
doc.add(new Field("_id", "1", StoredField.TYPE));
doc.add(new AllField("_all", "something", 1.0f, allFt));
doc.add(new AllField("_all", "else", 1.0f, allFt));
indexWriter.addDocument(doc);
doc = new Document();
doc.add(new Field("_id", "2", StoredField.TYPE));
doc.add(new AllField("_all", "else", 1.0f, allFt));
doc.add(new AllField("_all", "something", 1.0f, allFt));
indexWriter.addDocument(doc);
IndexReader reader = DirectoryReader.open(indexWriter);
IndexSearcher searcher = new IndexSearcher(reader);
Query query = new AllTermQuery(new Term("_all", "else"));
TopDocs docs = searcher.search(query, 10);
assertThat(docs.totalHits, equalTo(2));
assertThat(docs.scoreDocs[0].doc, equalTo(0));
assertExplanationScore(searcher, query, docs.scoreDocs[0]);
assertThat(docs.scoreDocs[1].doc, equalTo(1));
assertExplanationScore(searcher, query, docs.scoreDocs[1]);
query = new AllTermQuery(new Term("_all", "something"));
docs = searcher.search(query, 10);
assertThat(docs.totalHits, equalTo(2));
assertThat(docs.scoreDocs[0].doc, equalTo(0));
assertExplanationScore(searcher, query, docs.scoreDocs[0]);
assertThat(docs.scoreDocs[1].doc, equalTo(1));
assertExplanationScore(searcher, query, docs.scoreDocs[1]);
indexWriter.close();
}
use of org.apache.lucene.index.IndexReader in project OpenGrok by OpenGrok.
the class IndexDatabase method update.
/**
* Update the content of this index database
*
* @throws IOException if an error occurs
* @throws HistoryException if an error occurs when accessing the history
*/
public void update() throws IOException, HistoryException {
synchronized (lock) {
if (running) {
throw new IOException("Indexer already running!");
}
running = true;
interrupted = false;
}
String ctgs = RuntimeEnvironment.getInstance().getCtags();
if (ctgs != null) {
ctags = new Ctags();
ctags.setBinary(ctgs);
}
if (ctags == null) {
LOGGER.severe("Unable to run ctags! searching definitions will not work!");
}
if (ctags != null) {
String filename = RuntimeEnvironment.getInstance().getCTagsExtraOptionsFile();
if (filename != null) {
ctags.setCTagsExtraOptionsFile(filename);
}
}
try {
Analyzer analyzer = AnalyzerGuru.getAnalyzer();
IndexWriterConfig iwc = new IndexWriterConfig(analyzer);
iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
iwc.setRAMBufferSizeMB(RuntimeEnvironment.getInstance().getRamBufferSize());
writer = new IndexWriter(indexDirectory, iwc);
// to make sure index exists on the disk
writer.commit();
if (directories.isEmpty()) {
if (project == null) {
directories.add("");
} else {
directories.add(project.getPath());
}
}
for (String dir : directories) {
File sourceRoot;
if ("".equals(dir)) {
sourceRoot = RuntimeEnvironment.getInstance().getSourceRootFile();
} else {
sourceRoot = new File(RuntimeEnvironment.getInstance().getSourceRootFile(), dir);
}
HistoryGuru.getInstance().ensureHistoryCacheExists(sourceRoot);
String startuid = Util.path2uid(dir, "");
// open existing index
IndexReader reader = DirectoryReader.open(indexDirectory);
Terms terms = null;
int numDocs = reader.numDocs();
if (numDocs > 0) {
//reader.getTermVectors(0);
Fields uFields = MultiFields.getFields(reader);
terms = uFields.terms(QueryBuilder.U);
}
try {
if (numDocs > 0) {
uidIter = terms.iterator();
//init uid
TermsEnum.SeekStatus stat = uidIter.seekCeil(new BytesRef(startuid));
if (stat == TermsEnum.SeekStatus.END) {
uidIter = null;
LOGGER.log(Level.WARNING, "Couldn't find a start term for {0}, empty u field?", startuid);
}
}
// The code below traverses the tree to get total count.
int file_cnt = 0;
if (RuntimeEnvironment.getInstance().isPrintProgress()) {
LOGGER.log(Level.INFO, "Counting files in {0} ...", dir);
file_cnt = indexDown(sourceRoot, dir, true, 0, 0);
LOGGER.log(Level.INFO, "Need to process: {0} files for {1}", new Object[] { file_cnt, dir });
}
indexDown(sourceRoot, dir, false, 0, file_cnt);
while (uidIter != null && uidIter.term() != null && uidIter.term().utf8ToString().startsWith(startuid)) {
removeFile();
BytesRef next = uidIter.next();
if (next == null) {
uidIter = null;
}
}
} finally {
reader.close();
}
}
} finally {
if (writer != null) {
try {
writer.prepareCommit();
writer.commit();
writer.close();
} catch (IOException e) {
LOGGER.log(Level.WARNING, "An error occured while closing writer", e);
}
}
if (ctags != null) {
try {
ctags.close();
} catch (IOException e) {
LOGGER.log(Level.WARNING, "An error occured while closing ctags process", e);
}
}
synchronized (lock) {
running = false;
}
}
if (!isInterrupted() && isDirty()) {
if (RuntimeEnvironment.getInstance().isOptimizeDatabase()) {
optimize();
}
RuntimeEnvironment env = RuntimeEnvironment.getInstance();
File timestamp = new File(env.getDataRootFile(), "timestamp");
String purpose = "used for timestamping the index database.";
if (timestamp.exists()) {
if (!timestamp.setLastModified(System.currentTimeMillis())) {
LOGGER.log(Level.WARNING, "Failed to set last modified time on ''{0}'', {1}", new Object[] { timestamp.getAbsolutePath(), purpose });
}
} else {
if (!timestamp.createNewFile()) {
LOGGER.log(Level.WARNING, "Failed to create file ''{0}'', {1}", new Object[] { timestamp.getAbsolutePath(), purpose });
}
}
}
}
use of org.apache.lucene.index.IndexReader in project neo4j by neo4j.
the class WritableIndexReferenceFactory method refresh.
/**
* If nothing has changed underneath (since the searcher was last created
* or refreshed) {@code searcher} is returned. But if something has changed a
* refreshed searcher is returned. It makes use if the
* {@link DirectoryReader#openIfChanged(DirectoryReader, IndexWriter, boolean)} which faster than opening an index
* from
* scratch.
*
* @param indexReference the {@link IndexReference} to refresh.
* @return a refreshed version of the searcher or, if nothing has changed,
* {@code null}.
* @throws RuntimeException if there's a problem with the index.
*/
@Override
IndexReference refresh(IndexReference indexReference) {
try {
DirectoryReader reader = (DirectoryReader) indexReference.getSearcher().getIndexReader();
IndexWriter writer = indexReference.getWriter();
IndexReader reopened = DirectoryReader.openIfChanged(reader, writer);
if (reopened != null) {
IndexSearcher newSearcher = newIndexSearcher(indexReference.getIdentifier(), reopened);
indexReference.detachOrClose();
return new WritableIndexReference(indexReference.getIdentifier(), newSearcher, writer);
}
return indexReference;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
use of org.apache.lucene.index.IndexReader in project crate by crate.
the class LuceneOrderedDocCollectorTest method testSearchAfterQueriesNullsLast.
// search after queries
@Test
public void testSearchAfterQueriesNullsLast() throws Exception {
Directory index = createLuceneIndex();
IndexReader reader = DirectoryReader.open(index);
// reverseOrdering = false, nulls First = false
// 1 2 null null
// ^ (lastCollected = 2)
FieldDoc afterDoc = new FieldDoc(0, 0, new Object[] { 2L });
Long[] result = nextPageQuery(reader, afterDoc, false, null);
assertThat(result, is(new Long[] { 2L, null, null }));
// reverseOrdering = false, nulls First = false
// 1 2 null null
// ^
afterDoc = new FieldDoc(0, 0, new Object[] { LuceneMissingValue.missingValue(false, null, SortField.Type.LONG) });
result = nextPageQuery(reader, afterDoc, false, null);
assertThat(result, is(new Long[] { null, null }));
// reverseOrdering = true, nulls First = false
// 2 1 null null
// ^
afterDoc = new FieldDoc(0, 0, new Object[] { 1L });
result = nextPageQuery(reader, afterDoc, true, false);
assertThat(result, is(new Long[] { 1L, null, null }));
// reverseOrdering = true, nulls First = false
// 2 1 null null
// ^
afterDoc = new FieldDoc(0, 0, new Object[] { LuceneMissingValue.missingValue(true, false, SortField.Type.LONG) });
result = nextPageQuery(reader, afterDoc, true, false);
assertThat(result, is(new Long[] { null, null }));
reader.close();
}
use of org.apache.lucene.index.IndexReader in project crate by crate.
the class LuceneOrderedDocCollectorTest method testSearchAfterQueriesNullsFirst.
@Test
public void testSearchAfterQueriesNullsFirst() throws Exception {
Directory index = createLuceneIndex();
IndexReader reader = DirectoryReader.open(index);
// reverseOrdering = false, nulls First = true
// null, null, 1, 2
// ^ (lastCollected = 2L)
FieldDoc afterDoc = new FieldDoc(0, 0, new Object[] { 2L });
Long[] result = nextPageQuery(reader, afterDoc, false, true);
assertThat(result, is(new Long[] { 2L }));
// reverseOrdering = false, nulls First = true
// null, null, 1, 2
// ^
afterDoc = new FieldDoc(0, 0, new Object[] { LuceneMissingValue.missingValue(false, true, SortField.Type.LONG) });
result = nextPageQuery(reader, afterDoc, false, true);
assertThat(result, is(new Long[] { null, null, 1L, 2L }));
// reverseOrdering = true, nulls First = true
// null, null, 2, 1
// ^
afterDoc = new FieldDoc(0, 0, new Object[] { 1L });
result = nextPageQuery(reader, afterDoc, true, true);
assertThat(result, is(new Long[] { 1L }));
// reverseOrdering = true, nulls First = true
// null, null, 2, 1
// ^
afterDoc = new FieldDoc(0, 0, new Object[] { LuceneMissingValue.missingValue(true, true, SortField.Type.LONG) });
result = nextPageQuery(reader, afterDoc, true, true);
assertThat(result, is(new Long[] { null, null, 2L, 1L }));
reader.close();
}
Aggregations