Search in sources :

Example 41 with StringField

use of org.apache.lucene.document.StringField in project lucene-solr by apache.

the class TestEmptyTokenStream method testIndexWriter_LUCENE4656.

public void testIndexWriter_LUCENE4656() throws IOException {
    Directory directory = newDirectory();
    IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(null));
    TokenStream ts = new EmptyTokenStream();
    assertFalse(ts.hasAttribute(TermToBytesRefAttribute.class));
    Document doc = new Document();
    doc.add(new StringField("id", "0", Field.Store.YES));
    doc.add(new TextField("description", ts));
    // this should not fail because we have no TermToBytesRefAttribute
    writer.addDocument(doc);
    assertEquals(1, writer.numDocs());
    writer.close();
    directory.close();
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) IndexWriter(org.apache.lucene.index.IndexWriter) TermToBytesRefAttribute(org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute) StringField(org.apache.lucene.document.StringField) TextField(org.apache.lucene.document.TextField) Document(org.apache.lucene.document.Document) Directory(org.apache.lucene.store.Directory)

Example 42 with StringField

use of org.apache.lucene.document.StringField in project lucene-solr by apache.

the class TestLucene54DocValuesFormat method doTestTermsEnumRandom.

// TODO: try to refactor this and some termsenum tests into the base class.
// to do this we need to fix the test class to get a DVF not a Codec so we can setup
// the postings format correctly.
private void doTestTermsEnumRandom(int numDocs, int minLength, int maxLength) throws Exception {
    Directory dir = newFSDirectory(createTempDir());
    IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
    conf.setMergeScheduler(new SerialMergeScheduler());
    // set to duel against a codec which has ordinals:
    final PostingsFormat pf = TestUtil.getPostingsFormatWithOrds(random());
    final DocValuesFormat dv = new Lucene54DocValuesFormat();
    conf.setCodec(new AssertingCodec() {

        @Override
        public PostingsFormat getPostingsFormatForField(String field) {
            return pf;
        }

        @Override
        public DocValuesFormat getDocValuesFormatForField(String field) {
            return dv;
        }
    });
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
    // index some docs
    for (int i = 0; i < numDocs; i++) {
        Document doc = new Document();
        Field idField = new StringField("id", Integer.toString(i), Field.Store.NO);
        doc.add(idField);
        final int length = TestUtil.nextInt(random(), minLength, maxLength);
        int numValues = random().nextInt(17);
        // create a random list of strings
        List<String> values = new ArrayList<>();
        for (int v = 0; v < numValues; v++) {
            values.add(TestUtil.randomSimpleString(random(), minLength, length));
        }
        // add in any order to the indexed field
        ArrayList<String> unordered = new ArrayList<>(values);
        Collections.shuffle(unordered, random());
        for (String v : values) {
            doc.add(newStringField("indexed", v, Field.Store.NO));
        }
        // add in any order to the dv field
        ArrayList<String> unordered2 = new ArrayList<>(values);
        Collections.shuffle(unordered2, random());
        for (String v : unordered2) {
            doc.add(new SortedSetDocValuesField("dv", new BytesRef(v)));
        }
        writer.addDocument(doc);
        if (random().nextInt(31) == 0) {
            writer.commit();
        }
    }
    // delete some docs
    int numDeletions = random().nextInt(numDocs / 10);
    for (int i = 0; i < numDeletions; i++) {
        int id = random().nextInt(numDocs);
        writer.deleteDocuments(new Term("id", Integer.toString(id)));
    }
    // compare per-segment
    DirectoryReader ir = writer.getReader();
    for (LeafReaderContext context : ir.leaves()) {
        LeafReader r = context.reader();
        Terms terms = r.terms("indexed");
        if (terms != null) {
            SortedSetDocValues ssdv = r.getSortedSetDocValues("dv");
            assertEquals(terms.size(), ssdv.getValueCount());
            TermsEnum expected = terms.iterator();
            TermsEnum actual = r.getSortedSetDocValues("dv").termsEnum();
            assertEquals(terms.size(), expected, actual);
            doTestSortedSetEnumAdvanceIndependently(ssdv);
        }
    }
    ir.close();
    writer.forceMerge(1);
    // now compare again after the merge
    ir = writer.getReader();
    LeafReader ar = getOnlyLeafReader(ir);
    Terms terms = ar.terms("indexed");
    if (terms != null) {
        assertEquals(terms.size(), ar.getSortedSetDocValues("dv").getValueCount());
        TermsEnum expected = terms.iterator();
        TermsEnum actual = ar.getSortedSetDocValues("dv").termsEnum();
        assertEquals(terms.size(), expected, actual);
    }
    ir.close();
    writer.close();
    dir.close();
}
Also used : ArrayList(java.util.ArrayList) Document(org.apache.lucene.document.Document) DocValuesFormat(org.apache.lucene.codecs.DocValuesFormat) TermsEnum(org.apache.lucene.index.TermsEnum) SerialMergeScheduler(org.apache.lucene.index.SerialMergeScheduler) IndexableField(org.apache.lucene.index.IndexableField) SortedNumericDocValuesField(org.apache.lucene.document.SortedNumericDocValuesField) StoredField(org.apache.lucene.document.StoredField) NumericDocValuesField(org.apache.lucene.document.NumericDocValuesField) SortedSetDocValuesField(org.apache.lucene.document.SortedSetDocValuesField) BinaryDocValuesField(org.apache.lucene.document.BinaryDocValuesField) SortedDocValuesField(org.apache.lucene.document.SortedDocValuesField) StringField(org.apache.lucene.document.StringField) Field(org.apache.lucene.document.Field) MockAnalyzer(org.apache.lucene.analysis.MockAnalyzer) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) BytesRef(org.apache.lucene.util.BytesRef) Directory(org.apache.lucene.store.Directory) AssertingCodec(org.apache.lucene.codecs.asserting.AssertingCodec) LeafReader(org.apache.lucene.index.LeafReader) DirectoryReader(org.apache.lucene.index.DirectoryReader) Terms(org.apache.lucene.index.Terms) Term(org.apache.lucene.index.Term) SortedSetDocValues(org.apache.lucene.index.SortedSetDocValues) PostingsFormat(org.apache.lucene.codecs.PostingsFormat) StringField(org.apache.lucene.document.StringField) SortedSetDocValuesField(org.apache.lucene.document.SortedSetDocValuesField) RandomIndexWriter(org.apache.lucene.index.RandomIndexWriter) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig)

Example 43 with StringField

use of org.apache.lucene.document.StringField in project lucene-solr by apache.

the class TestBackwardsCompatibility method testCreateIndexWithDocValuesUpdates.

// Creates an index with DocValues updates
public void testCreateIndexWithDocValuesUpdates() throws Exception {
    Path indexDir = getIndexDir().resolve("dvupdates");
    Files.deleteIfExists(indexDir);
    Directory dir = newFSDirectory(indexDir);
    IndexWriterConfig conf = new IndexWriterConfig(new MockAnalyzer(random())).setUseCompoundFile(false).setMergePolicy(NoMergePolicy.INSTANCE);
    IndexWriter writer = new IndexWriter(dir, conf);
    // create an index w/ few doc-values fields, some with updates and some without
    for (int i = 0; i < 30; i++) {
        Document doc = new Document();
        doc.add(new StringField("id", "" + i, Field.Store.NO));
        doc.add(new NumericDocValuesField("ndv1", i));
        doc.add(new NumericDocValuesField("ndv1_c", i * 2));
        doc.add(new NumericDocValuesField("ndv2", i * 3));
        doc.add(new NumericDocValuesField("ndv2_c", i * 6));
        doc.add(new BinaryDocValuesField("bdv1", toBytes(i)));
        doc.add(new BinaryDocValuesField("bdv1_c", toBytes(i * 2)));
        doc.add(new BinaryDocValuesField("bdv2", toBytes(i * 3)));
        doc.add(new BinaryDocValuesField("bdv2_c", toBytes(i * 6)));
        writer.addDocument(doc);
        if ((i + 1) % 10 == 0) {
            // flush every 10 docs
            writer.commit();
        }
    }
    // first segment: no updates
    // second segment: update two fields, same gen
    updateNumeric(writer, "10", "ndv1", "ndv1_c", 100L);
    updateBinary(writer, "11", "bdv1", "bdv1_c", 100L);
    writer.commit();
    // third segment: update few fields, different gens, few docs
    updateNumeric(writer, "20", "ndv1", "ndv1_c", 100L);
    updateBinary(writer, "21", "bdv1", "bdv1_c", 100L);
    writer.commit();
    // update the field again
    updateNumeric(writer, "22", "ndv1", "ndv1_c", 200L);
    writer.commit();
    writer.close();
    dir.close();
}
Also used : Path(java.nio.file.Path) MockAnalyzer(org.apache.lucene.analysis.MockAnalyzer) SortedNumericDocValuesField(org.apache.lucene.document.SortedNumericDocValuesField) NumericDocValuesField(org.apache.lucene.document.NumericDocValuesField) StringField(org.apache.lucene.document.StringField) Document(org.apache.lucene.document.Document) BinaryDocValuesField(org.apache.lucene.document.BinaryDocValuesField) BinaryPoint(org.apache.lucene.document.BinaryPoint) DoublePoint(org.apache.lucene.document.DoublePoint) LongPoint(org.apache.lucene.document.LongPoint) IntPoint(org.apache.lucene.document.IntPoint) FloatPoint(org.apache.lucene.document.FloatPoint) Directory(org.apache.lucene.store.Directory) RAMDirectory(org.apache.lucene.store.RAMDirectory) FSDirectory(org.apache.lucene.store.FSDirectory) SimpleFSDirectory(org.apache.lucene.store.SimpleFSDirectory) NIOFSDirectory(org.apache.lucene.store.NIOFSDirectory)

Example 44 with StringField

use of org.apache.lucene.document.StringField in project lucene-solr by apache.

the class TestBackwardsCompatibility method addDoc.

private void addDoc(IndexWriter writer, int id) throws IOException {
    Document doc = new Document();
    doc.add(new TextField("content", "aaa", Field.Store.NO));
    doc.add(new StringField("id", Integer.toString(id), Field.Store.YES));
    FieldType customType2 = new FieldType(TextField.TYPE_STORED);
    customType2.setStoreTermVectors(true);
    customType2.setStoreTermVectorPositions(true);
    customType2.setStoreTermVectorOffsets(true);
    doc.add(new Field("autf8", "Luš„žceš… ne  ā˜  abń•°—cd", customType2));
    doc.add(new Field("utf8", "Luš„žceš… ne  ā˜  abń•°—cd", customType2));
    doc.add(new Field("content2", "here is more content with aaa aaa aaa", customType2));
    doc.add(new Field("fieā±·ld", "field with non-ascii name", customType2));
    // add docvalues fields
    doc.add(new NumericDocValuesField("dvByte", (byte) id));
    byte[] bytes = new byte[] { (byte) (id >>> 24), (byte) (id >>> 16), (byte) (id >>> 8), (byte) id };
    BytesRef ref = new BytesRef(bytes);
    doc.add(new BinaryDocValuesField("dvBytesDerefFixed", ref));
    doc.add(new BinaryDocValuesField("dvBytesDerefVar", ref));
    doc.add(new SortedDocValuesField("dvBytesSortedFixed", ref));
    doc.add(new SortedDocValuesField("dvBytesSortedVar", ref));
    doc.add(new BinaryDocValuesField("dvBytesStraightFixed", ref));
    doc.add(new BinaryDocValuesField("dvBytesStraightVar", ref));
    doc.add(new DoubleDocValuesField("dvDouble", (double) id));
    doc.add(new FloatDocValuesField("dvFloat", (float) id));
    doc.add(new NumericDocValuesField("dvInt", id));
    doc.add(new NumericDocValuesField("dvLong", id));
    doc.add(new NumericDocValuesField("dvPacked", id));
    doc.add(new NumericDocValuesField("dvShort", (short) id));
    doc.add(new SortedSetDocValuesField("dvSortedSet", ref));
    doc.add(new SortedNumericDocValuesField("dvSortedNumeric", id));
    doc.add(new IntPoint("intPoint1d", id));
    doc.add(new IntPoint("intPoint2d", id, 2 * id));
    doc.add(new FloatPoint("floatPoint1d", (float) id));
    doc.add(new FloatPoint("floatPoint2d", (float) id, (float) 2 * id));
    doc.add(new LongPoint("longPoint1d", id));
    doc.add(new LongPoint("longPoint2d", id, 2 * id));
    doc.add(new DoublePoint("doublePoint1d", (double) id));
    doc.add(new DoublePoint("doublePoint2d", (double) id, (double) 2 * id));
    doc.add(new BinaryPoint("binaryPoint1d", bytes));
    doc.add(new BinaryPoint("binaryPoint2d", bytes, bytes));
    // a field with both offsets and term vectors for a cross-check
    FieldType customType3 = new FieldType(TextField.TYPE_STORED);
    customType3.setStoreTermVectors(true);
    customType3.setStoreTermVectorPositions(true);
    customType3.setStoreTermVectorOffsets(true);
    customType3.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
    doc.add(new Field("content5", "here is more content with aaa aaa aaa", customType3));
    // a field that omits only positions
    FieldType customType4 = new FieldType(TextField.TYPE_STORED);
    customType4.setStoreTermVectors(true);
    customType4.setStoreTermVectorPositions(false);
    customType4.setStoreTermVectorOffsets(true);
    customType4.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
    doc.add(new Field("content6", "here is more content with aaa aaa aaa", customType4));
    // TODO: 
    //   index different norms types via similarity (we use a random one currently?!)
    //   remove any analyzer randomness, explicitly add payloads for certain fields.
    writer.addDocument(doc);
}
Also used : BinaryPoint(org.apache.lucene.document.BinaryPoint) FloatDocValuesField(org.apache.lucene.document.FloatDocValuesField) LongPoint(org.apache.lucene.document.LongPoint) Document(org.apache.lucene.document.Document) BinaryDocValuesField(org.apache.lucene.document.BinaryDocValuesField) FieldType(org.apache.lucene.document.FieldType) SortedNumericDocValuesField(org.apache.lucene.document.SortedNumericDocValuesField) SortField(org.apache.lucene.search.SortField) NumericDocValuesField(org.apache.lucene.document.NumericDocValuesField) SortedSetDocValuesField(org.apache.lucene.document.SortedSetDocValuesField) BinaryDocValuesField(org.apache.lucene.document.BinaryDocValuesField) SortedDocValuesField(org.apache.lucene.document.SortedDocValuesField) StringField(org.apache.lucene.document.StringField) DoubleDocValuesField(org.apache.lucene.document.DoubleDocValuesField) FloatDocValuesField(org.apache.lucene.document.FloatDocValuesField) Field(org.apache.lucene.document.Field) TextField(org.apache.lucene.document.TextField) IntPoint(org.apache.lucene.document.IntPoint) SortedNumericDocValuesField(org.apache.lucene.document.SortedNumericDocValuesField) SortedNumericDocValuesField(org.apache.lucene.document.SortedNumericDocValuesField) NumericDocValuesField(org.apache.lucene.document.NumericDocValuesField) FloatPoint(org.apache.lucene.document.FloatPoint) StringField(org.apache.lucene.document.StringField) DoubleDocValuesField(org.apache.lucene.document.DoubleDocValuesField) SortedDocValuesField(org.apache.lucene.document.SortedDocValuesField) DoublePoint(org.apache.lucene.document.DoublePoint) TextField(org.apache.lucene.document.TextField) SortedSetDocValuesField(org.apache.lucene.document.SortedSetDocValuesField) BytesRef(org.apache.lucene.util.BytesRef)

Example 45 with StringField

use of org.apache.lucene.document.StringField in project lucene-solr by apache.

the class TestLucene70DocValuesFormat method doTestTermsEnumRandom.

// TODO: try to refactor this and some termsenum tests into the base class.
// to do this we need to fix the test class to get a DVF not a Codec so we can setup
// the postings format correctly.
private void doTestTermsEnumRandom(int numDocs, Supplier<String> valuesProducer) throws Exception {
    Directory dir = newFSDirectory(createTempDir());
    IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
    conf.setMergeScheduler(new SerialMergeScheduler());
    // set to duel against a codec which has ordinals:
    final PostingsFormat pf = TestUtil.getPostingsFormatWithOrds(random());
    final DocValuesFormat dv = new Lucene70DocValuesFormat();
    conf.setCodec(new AssertingCodec() {

        @Override
        public PostingsFormat getPostingsFormatForField(String field) {
            return pf;
        }

        @Override
        public DocValuesFormat getDocValuesFormatForField(String field) {
            return dv;
        }
    });
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
    // index some docs
    for (int i = 0; i < numDocs; i++) {
        Document doc = new Document();
        Field idField = new StringField("id", Integer.toString(i), Field.Store.NO);
        doc.add(idField);
        int numValues = random().nextInt(17);
        // create a random list of strings
        List<String> values = new ArrayList<>();
        for (int v = 0; v < numValues; v++) {
            values.add(valuesProducer.get());
        }
        // add in any order to the indexed field
        ArrayList<String> unordered = new ArrayList<>(values);
        Collections.shuffle(unordered, random());
        for (String v : values) {
            doc.add(newStringField("indexed", v, Field.Store.NO));
        }
        // add in any order to the dv field
        ArrayList<String> unordered2 = new ArrayList<>(values);
        Collections.shuffle(unordered2, random());
        for (String v : unordered2) {
            doc.add(new SortedSetDocValuesField("dv", new BytesRef(v)));
        }
        writer.addDocument(doc);
        if (random().nextInt(31) == 0) {
            writer.commit();
        }
    }
    // delete some docs
    int numDeletions = random().nextInt(numDocs / 10);
    for (int i = 0; i < numDeletions; i++) {
        int id = random().nextInt(numDocs);
        writer.deleteDocuments(new Term("id", Integer.toString(id)));
    }
    // compare per-segment
    DirectoryReader ir = writer.getReader();
    for (LeafReaderContext context : ir.leaves()) {
        LeafReader r = context.reader();
        Terms terms = r.terms("indexed");
        if (terms != null) {
            SortedSetDocValues ssdv = r.getSortedSetDocValues("dv");
            assertEquals(terms.size(), ssdv.getValueCount());
            TermsEnum expected = terms.iterator();
            TermsEnum actual = r.getSortedSetDocValues("dv").termsEnum();
            assertEquals(terms.size(), expected, actual);
            doTestSortedSetEnumAdvanceIndependently(ssdv);
        }
    }
    ir.close();
    writer.forceMerge(1);
    // now compare again after the merge
    ir = writer.getReader();
    LeafReader ar = getOnlyLeafReader(ir);
    Terms terms = ar.terms("indexed");
    if (terms != null) {
        assertEquals(terms.size(), ar.getSortedSetDocValues("dv").getValueCount());
        TermsEnum expected = terms.iterator();
        TermsEnum actual = ar.getSortedSetDocValues("dv").termsEnum();
        assertEquals(terms.size(), expected, actual);
    }
    ir.close();
    writer.close();
    dir.close();
}
Also used : Lucene70DocValuesFormat(org.apache.lucene.codecs.lucene70.Lucene70DocValuesFormat) ArrayList(java.util.ArrayList) Document(org.apache.lucene.document.Document) Lucene70DocValuesFormat(org.apache.lucene.codecs.lucene70.Lucene70DocValuesFormat) DocValuesFormat(org.apache.lucene.codecs.DocValuesFormat) TermsEnum(org.apache.lucene.index.TermsEnum) SerialMergeScheduler(org.apache.lucene.index.SerialMergeScheduler) IndexableField(org.apache.lucene.index.IndexableField) SortedNumericDocValuesField(org.apache.lucene.document.SortedNumericDocValuesField) StoredField(org.apache.lucene.document.StoredField) NumericDocValuesField(org.apache.lucene.document.NumericDocValuesField) SortedSetDocValuesField(org.apache.lucene.document.SortedSetDocValuesField) BinaryDocValuesField(org.apache.lucene.document.BinaryDocValuesField) SortedDocValuesField(org.apache.lucene.document.SortedDocValuesField) StringField(org.apache.lucene.document.StringField) Field(org.apache.lucene.document.Field) MockAnalyzer(org.apache.lucene.analysis.MockAnalyzer) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) BytesRef(org.apache.lucene.util.BytesRef) Directory(org.apache.lucene.store.Directory) AssertingCodec(org.apache.lucene.codecs.asserting.AssertingCodec) LeafReader(org.apache.lucene.index.LeafReader) DirectoryReader(org.apache.lucene.index.DirectoryReader) Terms(org.apache.lucene.index.Terms) Term(org.apache.lucene.index.Term) SortedSetDocValues(org.apache.lucene.index.SortedSetDocValues) PostingsFormat(org.apache.lucene.codecs.PostingsFormat) StringField(org.apache.lucene.document.StringField) SortedSetDocValuesField(org.apache.lucene.document.SortedSetDocValuesField) RandomIndexWriter(org.apache.lucene.index.RandomIndexWriter) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig)

Aggregations

StringField (org.apache.lucene.document.StringField)323 Document (org.apache.lucene.document.Document)302 Directory (org.apache.lucene.store.Directory)227 MockAnalyzer (org.apache.lucene.analysis.MockAnalyzer)129 NumericDocValuesField (org.apache.lucene.document.NumericDocValuesField)94 Term (org.apache.lucene.index.Term)90 RandomIndexWriter (org.apache.lucene.index.RandomIndexWriter)82 BytesRef (org.apache.lucene.util.BytesRef)73 IndexSearcher (org.apache.lucene.search.IndexSearcher)57 DirectoryReader (org.apache.lucene.index.DirectoryReader)56 BinaryDocValuesField (org.apache.lucene.document.BinaryDocValuesField)55 ArrayList (java.util.ArrayList)54 TextField (org.apache.lucene.document.TextField)54 IndexReader (org.apache.lucene.index.IndexReader)51 Field (org.apache.lucene.document.Field)50 TermQuery (org.apache.lucene.search.TermQuery)50 IndexWriter (org.apache.lucene.index.IndexWriter)45 SortedNumericDocValuesField (org.apache.lucene.document.SortedNumericDocValuesField)43 NRTCachingDirectory (org.apache.lucene.store.NRTCachingDirectory)43 SortedDocValuesField (org.apache.lucene.document.SortedDocValuesField)40