Search in sources :

Example 16 with TopDocs

use of org.apache.lucene.search.TopDocs in project elasticsearch by elastic.

the class InternalEngineTests method testTranslogReplay.

public void testTranslogReplay() throws IOException {
    final int numDocs = randomIntBetween(1, 10);
    for (int i = 0; i < numDocs; i++) {
        ParsedDocument doc = testParsedDocument(Integer.toString(i), "test", null, testDocument(), new BytesArray("{}"), null);
        Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false);
        Engine.IndexResult indexResult = engine.index(firstIndexRequest);
        assertThat(indexResult.getVersion(), equalTo(1L));
    }
    engine.refresh("test");
    try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
        TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10));
        assertThat(topDocs.totalHits, equalTo(numDocs));
    }
    TranslogHandler parser = (TranslogHandler) engine.config().getTranslogRecoveryPerformer();
    parser.mappingUpdate = dynamicUpdate();
    engine.close();
    // we need to reuse the engine config unless the parser.mappingModified won't work
    engine = new InternalEngine(copy(engine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG));
    engine.recoverFromTranslog();
    try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
        TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10));
        assertThat(topDocs.totalHits, equalTo(numDocs));
    }
    parser = (TranslogHandler) engine.config().getTranslogRecoveryPerformer();
    assertEquals(numDocs, parser.recoveredOps.get());
    if (parser.mappingUpdate != null) {
        assertEquals(1, parser.getRecoveredTypes().size());
        assertTrue(parser.getRecoveredTypes().containsKey("test"));
    } else {
        assertEquals(0, parser.getRecoveredTypes().size());
    }
    engine.close();
    engine = createEngine(store, primaryTranslogDir);
    try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
        TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10));
        assertThat(topDocs.totalHits, equalTo(numDocs));
    }
    parser = (TranslogHandler) engine.config().getTranslogRecoveryPerformer();
    assertEquals(0, parser.recoveredOps.get());
    final boolean flush = randomBoolean();
    int randomId = randomIntBetween(numDocs + 1, numDocs + 10);
    ParsedDocument doc = testParsedDocument(Integer.toString(randomId), "test", null, testDocument(), new BytesArray("{}"), null);
    Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, 1, VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false);
    Engine.IndexResult indexResult = engine.index(firstIndexRequest);
    assertThat(indexResult.getVersion(), equalTo(1L));
    if (flush) {
        engine.flush();
    }
    doc = testParsedDocument(Integer.toString(randomId), "test", null, testDocument(), new BytesArray("{}"), null);
    Engine.Index idxRequest = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, 2, VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false);
    Engine.IndexResult result = engine.index(idxRequest);
    engine.refresh("test");
    assertThat(result.getVersion(), equalTo(2L));
    try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
        TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), numDocs + 1);
        assertThat(topDocs.totalHits, equalTo(numDocs + 1));
    }
    engine.close();
    engine = createEngine(store, primaryTranslogDir);
    try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
        TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), numDocs + 1);
        assertThat(topDocs.totalHits, equalTo(numDocs + 1));
    }
    parser = (TranslogHandler) engine.config().getTranslogRecoveryPerformer();
    assertEquals(flush ? 1 : 2, parser.recoveredOps.get());
    engine.delete(new Engine.Delete("test", Integer.toString(randomId), newUid(doc)));
    if (randomBoolean()) {
        engine.refresh("test");
    } else {
        engine.close();
        engine = createEngine(store, primaryTranslogDir);
    }
    try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
        TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), numDocs);
        assertThat(topDocs.totalHits, equalTo(numDocs));
    }
}
Also used : Searcher(org.elasticsearch.index.engine.Engine.Searcher) BytesArray(org.elasticsearch.common.bytes.BytesArray) Index(org.elasticsearch.index.Index) MatchAllDocsQuery(org.apache.lucene.search.MatchAllDocsQuery) LongPoint(org.apache.lucene.document.LongPoint) TopDocs(org.apache.lucene.search.TopDocs) ParsedDocument(org.elasticsearch.index.mapper.ParsedDocument)

Example 17 with TopDocs

use of org.apache.lucene.search.TopDocs in project elasticsearch by elastic.

the class InternalEngineTests method testAppendConcurrently.

public void testAppendConcurrently() throws InterruptedException, IOException {
    Thread[] thread = new Thread[randomIntBetween(3, 5)];
    int numDocs = randomIntBetween(1000, 10000);
    assertEquals(0, engine.getNumVersionLookups());
    assertEquals(0, engine.getNumIndexVersionsLookups());
    List<Engine.Index> docs = new ArrayList<>();
    for (int i = 0; i < numDocs; i++) {
        final ParsedDocument doc = testParsedDocument(Integer.toString(i), "test", null, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
        Engine.Index index = randomAppendOnly(doc, false, i);
        docs.add(index);
    }
    Collections.shuffle(docs, random());
    CountDownLatch startGun = new CountDownLatch(thread.length);
    AtomicInteger offset = new AtomicInteger(-1);
    for (int i = 0; i < thread.length; i++) {
        thread[i] = new Thread() {

            @Override
            public void run() {
                startGun.countDown();
                try {
                    startGun.await();
                } catch (InterruptedException e) {
                    throw new AssertionError(e);
                }
                int docOffset;
                while ((docOffset = offset.incrementAndGet()) < docs.size()) {
                    try {
                        engine.index(docs.get(docOffset));
                    } catch (IOException e) {
                        throw new AssertionError(e);
                    }
                }
            }
        };
        thread[i].start();
    }
    for (int i = 0; i < thread.length; i++) {
        thread[i].join();
    }
    engine.refresh("test");
    try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
        TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10);
        assertEquals(docs.size(), topDocs.totalHits);
    }
    assertEquals(0, engine.getNumVersionLookups());
    assertEquals(0, engine.getNumIndexVersionsLookups());
    assertFalse(engine.indexWriterHasDeletions());
}
Also used : Searcher(org.elasticsearch.index.engine.Engine.Searcher) BytesArray(org.elasticsearch.common.bytes.BytesArray) ArrayList(java.util.ArrayList) Index(org.elasticsearch.index.Index) UncheckedIOException(java.io.UncheckedIOException) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) MatchAllDocsQuery(org.apache.lucene.search.MatchAllDocsQuery) LongPoint(org.apache.lucene.document.LongPoint) TopDocs(org.apache.lucene.search.TopDocs) ParsedDocument(org.elasticsearch.index.mapper.ParsedDocument) AtomicInteger(java.util.concurrent.atomic.AtomicInteger)

Example 18 with TopDocs

use of org.apache.lucene.search.TopDocs in project elasticsearch by elastic.

the class InternalEngineTests method testRetryWithAutogeneratedIdsAndWrongOrderWorksAndNoDuplicateDocs.

public void testRetryWithAutogeneratedIdsAndWrongOrderWorksAndNoDuplicateDocs() throws IOException {
    final ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
    boolean isRetry = true;
    long autoGeneratedIdTimestamp = 0;
    Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
    Engine.IndexResult result = engine.index(firstIndexRequest);
    assertThat(result.getVersion(), equalTo(1L));
    Engine.Index firstIndexRequestReplica = new Engine.Index(newUid(doc), doc, result.getSeqNo(), firstIndexRequest.primaryTerm(), result.getVersion(), firstIndexRequest.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
    Engine.IndexResult indexReplicaResult = replicaEngine.index(firstIndexRequestReplica);
    assertThat(indexReplicaResult.getVersion(), equalTo(1L));
    isRetry = false;
    Engine.Index secondIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
    Engine.IndexResult indexResult = engine.index(secondIndexRequest);
    assertTrue(indexResult.isCreated());
    engine.refresh("test");
    try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
        TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10);
        assertEquals(1, topDocs.totalHits);
    }
    Engine.Index secondIndexRequestReplica = new Engine.Index(newUid(doc), doc, result.getSeqNo(), secondIndexRequest.primaryTerm(), result.getVersion(), firstIndexRequest.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
    replicaEngine.index(secondIndexRequestReplica);
    replicaEngine.refresh("test");
    try (Engine.Searcher searcher = replicaEngine.acquireSearcher("test")) {
        TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10);
        assertEquals(1, topDocs.totalHits);
    }
}
Also used : Searcher(org.elasticsearch.index.engine.Engine.Searcher) TopDocs(org.apache.lucene.search.TopDocs) BytesArray(org.elasticsearch.common.bytes.BytesArray) ParsedDocument(org.elasticsearch.index.mapper.ParsedDocument) Index(org.elasticsearch.index.Index) MatchAllDocsQuery(org.apache.lucene.search.MatchAllDocsQuery)

Example 19 with TopDocs

use of org.apache.lucene.search.TopDocs in project elasticsearch by elastic.

the class DoubleIndexingDocTests method testDoubleIndexingSameDoc.

public void testDoubleIndexingSameDoc() throws Exception {
    Directory dir = newDirectory();
    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(random(), Lucene.STANDARD_ANALYZER));
    String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties").endObject().endObject().endObject().string();
    IndexService index = createIndex("test");
    client().admin().indices().preparePutMapping("test").setType("type").setSource(mapping, XContentType.JSON).get();
    DocumentMapper mapper = index.mapperService().documentMapper("type");
    QueryShardContext context = index.newQueryShardContext(0, null, () -> 0L);
    ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject().field("field1", "value1").field("field2", 1).field("field3", 1.1).field("field4", "2010-01-01").startArray("field5").value(1).value(2).value(3).endArray().endObject().bytes());
    assertNotNull(doc.dynamicMappingsUpdate());
    client().admin().indices().preparePutMapping("test").setType("type").setSource(doc.dynamicMappingsUpdate().toString(), XContentType.JSON).get();
    mapper = index.mapperService().documentMapper("type");
    writer.addDocument(doc.rootDoc());
    writer.addDocument(doc.rootDoc());
    IndexReader reader = DirectoryReader.open(writer);
    IndexSearcher searcher = new IndexSearcher(reader);
    TopDocs topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field1").fieldType().termQuery("value1", context), 10);
    assertThat(topDocs.totalHits, equalTo(2));
    topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field2").fieldType().termQuery("1", context), 10);
    assertThat(topDocs.totalHits, equalTo(2));
    topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field3").fieldType().termQuery("1.1", context), 10);
    assertThat(topDocs.totalHits, equalTo(2));
    topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field4").fieldType().termQuery("2010-01-01", context), 10);
    assertThat(topDocs.totalHits, equalTo(2));
    topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").fieldType().termQuery("1", context), 10);
    assertThat(topDocs.totalHits, equalTo(2));
    topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").fieldType().termQuery("2", context), 10);
    assertThat(topDocs.totalHits, equalTo(2));
    topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").fieldType().termQuery("3", context), 10);
    assertThat(topDocs.totalHits, equalTo(2));
    writer.close();
    reader.close();
    dir.close();
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) TopDocs(org.apache.lucene.search.TopDocs) ParsedDocument(org.elasticsearch.index.mapper.ParsedDocument) IndexWriter(org.apache.lucene.index.IndexWriter) IndexService(org.elasticsearch.index.IndexService) DocumentMapper(org.elasticsearch.index.mapper.DocumentMapper) IndexReader(org.apache.lucene.index.IndexReader) QueryShardContext(org.elasticsearch.index.query.QueryShardContext) Directory(org.apache.lucene.store.Directory)

Example 20 with TopDocs

use of org.apache.lucene.search.TopDocs in project elasticsearch by elastic.

the class FunctionScoreTests method testSimpleWeightedFunction.

public void testSimpleWeightedFunction() throws IOException, ExecutionException, InterruptedException {
    int numFunctions = randomIntBetween(1, 3);
    float[] weights = randomFloats(numFunctions);
    double[] scores = randomDoubles(numFunctions);
    ScoreFunctionStub[] scoreFunctionStubs = new ScoreFunctionStub[numFunctions];
    for (int i = 0; i < numFunctions; i++) {
        scoreFunctionStubs[i] = new ScoreFunctionStub(scores[i]);
    }
    WeightFactorFunction[] weightFunctionStubs = new WeightFactorFunction[numFunctions];
    for (int i = 0; i < numFunctions; i++) {
        weightFunctionStubs[i] = new WeightFactorFunction(weights[i], scoreFunctionStubs[i]);
    }
    FiltersFunctionScoreQuery filtersFunctionScoreQueryWithWeights = getFiltersFunctionScoreQuery(FiltersFunctionScoreQuery.ScoreMode.MULTIPLY, CombineFunction.REPLACE, weightFunctionStubs);
    TopDocs topDocsWithWeights = searcher.search(filtersFunctionScoreQueryWithWeights, 1);
    float scoreWithWeight = topDocsWithWeights.scoreDocs[0].score;
    double score = 1;
    for (int i = 0; i < weights.length; i++) {
        score *= weights[i] * scores[i];
    }
    assertThat(scoreWithWeight / (float) score, is(1f));
    float explainedScore = getExplanation(searcher, filtersFunctionScoreQueryWithWeights).getValue();
    assertThat(explainedScore / scoreWithWeight, is(1f));
    filtersFunctionScoreQueryWithWeights = getFiltersFunctionScoreQuery(FiltersFunctionScoreQuery.ScoreMode.SUM, CombineFunction.REPLACE, weightFunctionStubs);
    topDocsWithWeights = searcher.search(filtersFunctionScoreQueryWithWeights, 1);
    scoreWithWeight = topDocsWithWeights.scoreDocs[0].score;
    double sum = 0;
    for (int i = 0; i < weights.length; i++) {
        sum += weights[i] * scores[i];
    }
    assertThat(scoreWithWeight / (float) sum, is(1f));
    explainedScore = getExplanation(searcher, filtersFunctionScoreQueryWithWeights).getValue();
    assertThat(explainedScore / scoreWithWeight, is(1f));
    filtersFunctionScoreQueryWithWeights = getFiltersFunctionScoreQuery(FiltersFunctionScoreQuery.ScoreMode.AVG, CombineFunction.REPLACE, weightFunctionStubs);
    topDocsWithWeights = searcher.search(filtersFunctionScoreQueryWithWeights, 1);
    scoreWithWeight = topDocsWithWeights.scoreDocs[0].score;
    double norm = 0;
    sum = 0;
    for (int i = 0; i < weights.length; i++) {
        norm += weights[i];
        sum += weights[i] * scores[i];
    }
    assertThat(scoreWithWeight / (float) (sum / norm), is(1f));
    explainedScore = getExplanation(searcher, filtersFunctionScoreQueryWithWeights).getValue();
    assertThat(explainedScore / scoreWithWeight, is(1f));
    filtersFunctionScoreQueryWithWeights = getFiltersFunctionScoreQuery(FiltersFunctionScoreQuery.ScoreMode.MIN, CombineFunction.REPLACE, weightFunctionStubs);
    topDocsWithWeights = searcher.search(filtersFunctionScoreQueryWithWeights, 1);
    scoreWithWeight = topDocsWithWeights.scoreDocs[0].score;
    double min = Double.POSITIVE_INFINITY;
    for (int i = 0; i < weights.length; i++) {
        min = Math.min(min, weights[i] * scores[i]);
    }
    assertThat(scoreWithWeight / (float) min, is(1f));
    explainedScore = getExplanation(searcher, filtersFunctionScoreQueryWithWeights).getValue();
    assertThat(explainedScore / scoreWithWeight, is(1f));
    filtersFunctionScoreQueryWithWeights = getFiltersFunctionScoreQuery(FiltersFunctionScoreQuery.ScoreMode.MAX, CombineFunction.REPLACE, weightFunctionStubs);
    topDocsWithWeights = searcher.search(filtersFunctionScoreQueryWithWeights, 1);
    scoreWithWeight = topDocsWithWeights.scoreDocs[0].score;
    double max = Double.NEGATIVE_INFINITY;
    for (int i = 0; i < weights.length; i++) {
        max = Math.max(max, weights[i] * scores[i]);
    }
    assertThat(scoreWithWeight / (float) max, is(1f));
    explainedScore = getExplanation(searcher, filtersFunctionScoreQueryWithWeights).getValue();
    assertThat(explainedScore / scoreWithWeight, is(1f));
}
Also used : TopDocs(org.apache.lucene.search.TopDocs) WeightFactorFunction(org.elasticsearch.common.lucene.search.function.WeightFactorFunction) FiltersFunctionScoreQuery(org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery)

Aggregations

TopDocs (org.apache.lucene.search.TopDocs)486 IndexSearcher (org.apache.lucene.search.IndexSearcher)295 Document (org.apache.lucene.document.Document)270 IndexReader (org.apache.lucene.index.IndexReader)186 TermQuery (org.apache.lucene.search.TermQuery)184 Directory (org.apache.lucene.store.Directory)173 Term (org.apache.lucene.index.Term)172 Query (org.apache.lucene.search.Query)163 RandomIndexWriter (org.apache.lucene.index.RandomIndexWriter)144 BooleanQuery (org.apache.lucene.search.BooleanQuery)125 ScoreDoc (org.apache.lucene.search.ScoreDoc)122 MatchAllDocsQuery (org.apache.lucene.search.MatchAllDocsQuery)118 Sort (org.apache.lucene.search.Sort)94 Field (org.apache.lucene.document.Field)85 SortField (org.apache.lucene.search.SortField)74 MockAnalyzer (org.apache.lucene.analysis.MockAnalyzer)56 IOException (java.io.IOException)53 TextField (org.apache.lucene.document.TextField)47 PhraseQuery (org.apache.lucene.search.PhraseQuery)46 PrefixQuery (org.apache.lucene.search.PrefixQuery)45