Search in sources :

Example 11 with Fieldable

use of org.apache.lucene.document.Fieldable in project neo4j-mobile-android by neo4j-contrib.

the class IndexType method instantiateField.

Fieldable instantiateField(String key, Object value, Index analyzed) {
    Fieldable field = null;
    if (value instanceof Number) {
        Number number = (Number) value;
        NumericField numberField = new NumericField(key, Store.YES, true);
        if (value instanceof Long) {
            numberField.setLongValue(number.longValue());
        } else if (value instanceof Float) {
            numberField.setFloatValue(number.floatValue());
        } else if (value instanceof Double) {
            numberField.setDoubleValue(number.doubleValue());
        } else {
            numberField.setIntValue(number.intValue());
        }
        field = numberField;
    } else {
        field = new Field(key, value.toString(), Store.YES, analyzed);
    }
    return field;
}
Also used : Field(org.apache.lucene.document.Field) NumericField(org.apache.lucene.document.NumericField) Fieldable(org.apache.lucene.document.Fieldable) NumericField(org.apache.lucene.document.NumericField)

Example 12 with Fieldable

use of org.apache.lucene.document.Fieldable in project neo4j-mobile-android by neo4j-contrib.

the class LuceneBatchInserterIndex method removeFromCache.

private void removeFromCache(long entityId) throws IOException, CorruptIndexException {
    IndexSearcher searcher = searcher();
    Query query = type.idTermQuery(entityId);
    TopDocs docs = searcher.search(query, 1);
    if (docs.totalHits > 0) {
        Document document = searcher.doc(docs.scoreDocs[0].doc);
        for (Fieldable field : document.getFields()) {
            String key = field.name();
            Object value = field.stringValue();
            removeFromCache(entityId, key, value);
        }
    }
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) TopDocs(org.apache.lucene.search.TopDocs) Query(org.apache.lucene.search.Query) Fieldable(org.apache.lucene.document.Fieldable) Document(org.apache.lucene.document.Document)

Example 13 with Fieldable

use of org.apache.lucene.document.Fieldable in project jackrabbit by apache.

the class AbstractExcerpt method getExcerpt.

/**
 * {@inheritDoc}
 */
public String getExcerpt(NodeId id, int maxFragments, int maxFragmentSize) throws IOException {
    IndexReader reader = index.getIndexReader();
    try {
        checkRewritten(reader);
        Term idTerm = TermFactory.createUUIDTerm(id.toString());
        TermDocs tDocs = reader.termDocs(idTerm);
        int docNumber;
        Document doc;
        try {
            if (tDocs.next()) {
                docNumber = tDocs.doc();
                doc = reader.document(docNumber);
            } else {
                // node not found in index
                return null;
            }
        } finally {
            tDocs.close();
        }
        Fieldable[] fields = doc.getFieldables(FieldNames.FULLTEXT);
        if (fields.length == 0) {
            log.debug("Fulltext field not stored, using {}", SimpleExcerptProvider.class.getName());
            SimpleExcerptProvider exProvider = new SimpleExcerptProvider();
            exProvider.init(query, index);
            return exProvider.getExcerpt(id, maxFragments, maxFragmentSize);
        }
        StringBuffer text = new StringBuffer();
        String separator = "";
        for (int i = 0; i < fields.length; i++) {
            if (fields[i].stringValue().length() == 0) {
                continue;
            }
            text.append(separator);
            text.append(fields[i].stringValue());
            separator = " ";
        }
        TermFreqVector tfv = reader.getTermFreqVector(docNumber, FieldNames.FULLTEXT);
        if (tfv instanceof TermPositionVector) {
            return createExcerpt((TermPositionVector) tfv, text.toString(), maxFragments, maxFragmentSize);
        } else {
            log.debug("No TermPositionVector on Fulltext field.");
            return null;
        }
    } finally {
        Util.closeOrRelease(reader);
    }
}
Also used : TermFreqVector(org.apache.lucene.index.TermFreqVector) Fieldable(org.apache.lucene.document.Fieldable) TermDocs(org.apache.lucene.index.TermDocs) IndexReader(org.apache.lucene.index.IndexReader) Term(org.apache.lucene.index.Term) Document(org.apache.lucene.document.Document) TermPositionVector(org.apache.lucene.index.TermPositionVector)

Example 14 with Fieldable

use of org.apache.lucene.document.Fieldable in project jackrabbit by apache.

the class AbstractIndex method getFinishedDocument.

/**
 * Returns a document that is finished with text extraction and is ready to
 * be added to the index.
 *
 * @param doc the document to check.
 * @return <code>doc</code> if it is finished already or a stripped down
 *         copy of <code>doc</code> without text extractors.
 * @throws IOException if the document cannot be added to the indexing
 *                     queue.
 */
private Document getFinishedDocument(Document doc) throws IOException {
    if (!Util.isDocumentReady(doc)) {
        Document copy = new Document();
        // mark the document that reindexing is required
        copy.add(new Field(FieldNames.REINDEXING_REQUIRED, false, "", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS, Field.TermVector.NO));
        for (Fieldable f : doc.getFields()) {
            Fieldable field = null;
            Field.TermVector tv = getTermVectorParameter(f);
            Field.Store stored = f.isStored() ? Field.Store.YES : Field.Store.NO;
            Field.Index indexed = getIndexParameter(f);
            if (f instanceof LazyTextExtractorField || f.readerValue() != null) {
                // replace all readers with empty string reader
                field = new Field(f.name(), new StringReader(""), tv);
            } else if (f.stringValue() != null) {
                field = new Field(f.name(), false, f.stringValue(), stored, indexed, tv);
            } else if (f.isBinary()) {
                field = new Field(f.name(), f.getBinaryValue(), stored);
            } else if (f.tokenStreamValue() != null && f.tokenStreamValue() instanceof SingletonTokenStream) {
                TokenStream tokenStream = f.tokenStreamValue();
                TermAttribute termAttribute = tokenStream.addAttribute(TermAttribute.class);
                PayloadAttribute payloadAttribute = tokenStream.addAttribute(PayloadAttribute.class);
                tokenStream.incrementToken();
                String value = new String(termAttribute.termBuffer(), 0, termAttribute.termLength());
                tokenStream.reset();
                field = new Field(f.name(), new SingletonTokenStream(value, (Payload) payloadAttribute.getPayload().clone()));
            }
            if (field != null) {
                field.setOmitNorms(f.getOmitNorms());
                copy.add(field);
            }
        }
        // schedule the original document for later indexing
        Document existing = indexingQueue.addDocument(doc);
        if (existing != null) {
            // the queue already contained a pending document for this
            // node. -> dispose the document
            Util.disposeDocument(existing);
        }
        // use the stripped down copy for now
        doc = copy;
    }
    return doc;
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) PayloadAttribute(org.apache.lucene.analysis.tokenattributes.PayloadAttribute) Document(org.apache.lucene.document.Document) Field(org.apache.lucene.document.Field) Fieldable(org.apache.lucene.document.Fieldable) StringReader(java.io.StringReader) TermAttribute(org.apache.lucene.analysis.tokenattributes.TermAttribute) Payload(org.apache.lucene.index.Payload)

Example 15 with Fieldable

use of org.apache.lucene.document.Fieldable in project jackrabbit by apache.

the class SearchIndex method mergeAggregatedNodeIndexes.

/**
 * Merges the fulltext indexed fields of the aggregated node states into
 * <code>doc</code>.
 *
 * @param state the node state on which <code>doc</code> was created.
 * @param doc the lucene document with index fields from <code>state</code>.
 * @param ifv the current index format version.
 */
protected void mergeAggregatedNodeIndexes(NodeState state, Document doc, IndexFormatVersion ifv) {
    if (indexingConfig != null) {
        AggregateRule[] aggregateRules = indexingConfig.getAggregateRules();
        if (aggregateRules == null) {
            return;
        }
        try {
            ItemStateManager ism = getContext().getItemStateManager();
            for (AggregateRule aggregateRule : aggregateRules) {
                boolean ruleMatched = false;
                // node includes
                NodeState[] aggregates = aggregateRule.getAggregatedNodeStates(state);
                if (aggregates != null) {
                    ruleMatched = true;
                    for (NodeState aggregate : aggregates) {
                        Document aDoc = createDocument(aggregate, getNamespaceMappings(), ifv);
                        // transfer fields to doc if there are any
                        Fieldable[] fulltextFields = aDoc.getFieldables(FieldNames.FULLTEXT);
                        if (fulltextFields != null) {
                            for (Fieldable fulltextField : fulltextFields) {
                                doc.add(fulltextField);
                            }
                            doc.add(new Field(FieldNames.AGGREGATED_NODE_UUID, false, aggregate.getNodeId().toString(), Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS, Field.TermVector.NO));
                        }
                    }
                    // make sure that fulltext fields are aligned properly
                    // first all stored fields, then remaining
                    Fieldable[] fulltextFields = doc.getFieldables(FieldNames.FULLTEXT);
                    doc.removeFields(FieldNames.FULLTEXT);
                    Arrays.sort(fulltextFields, FIELDS_COMPARATOR_STORED);
                    for (Fieldable f : fulltextFields) {
                        doc.add(f);
                    }
                }
                // property includes
                PropertyState[] propStates = aggregateRule.getAggregatedPropertyStates(state);
                if (propStates != null) {
                    ruleMatched = true;
                    for (PropertyState propState : propStates) {
                        String namePrefix = FieldNames.createNamedValue(getNamespaceMappings().translateName(propState.getName()), "");
                        NodeState parent = (NodeState) ism.getItemState(propState.getParentId());
                        Document aDoc = createDocument(parent, getNamespaceMappings(), ifv);
                        try {
                            // find the right fields to transfer
                            Fieldable[] fields = aDoc.getFieldables(FieldNames.PROPERTIES);
                            for (Fieldable field : fields) {
                                // assume properties fields use SingleTokenStream
                                TokenStream tokenStream = field.tokenStreamValue();
                                TermAttribute termAttribute = tokenStream.addAttribute(TermAttribute.class);
                                PayloadAttribute payloadAttribute = tokenStream.addAttribute(PayloadAttribute.class);
                                tokenStream.incrementToken();
                                tokenStream.end();
                                tokenStream.close();
                                String value = new String(termAttribute.termBuffer(), 0, termAttribute.termLength());
                                if (value.startsWith(namePrefix)) {
                                    // extract value
                                    String rawValue = value.substring(namePrefix.length());
                                    // create new named value
                                    Path p = getRelativePath(state, propState);
                                    String path = getNamespaceMappings().translatePath(p);
                                    value = FieldNames.createNamedValue(path, rawValue);
                                    termAttribute.setTermBuffer(value);
                                    PropertyMetaData pdm = PropertyMetaData.fromByteArray(payloadAttribute.getPayload().getData());
                                    doc.add(new Field(field.name(), new SingletonTokenStream(value, pdm.getPropertyType())));
                                    doc.add(new Field(FieldNames.AGGREGATED_NODE_UUID, false, parent.getNodeId().toString(), Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS, Field.TermVector.NO));
                                    if (pdm.getPropertyType() == PropertyType.STRING) {
                                        // add to fulltext index
                                        Field ft = new Field(FieldNames.FULLTEXT, false, rawValue, Field.Store.YES, Field.Index.ANALYZED_NO_NORMS, Field.TermVector.NO);
                                        doc.add(ft);
                                    }
                                }
                            }
                        } finally {
                            Util.disposeDocument(aDoc);
                        }
                    }
                }
                // only use first aggregate definition that matches
                if (ruleMatched) {
                    break;
                }
            }
        } catch (NoSuchItemStateException e) {
            // do not fail if aggregate cannot be created
            log.info("Exception while building indexing aggregate for {}. Node is not available {}.", state.getNodeId(), e.getMessage());
        } catch (Exception e) {
            // do not fail if aggregate cannot be created
            log.warn("Exception while building indexing aggregate for " + state.getNodeId(), e);
        }
    }
}
Also used : Path(org.apache.jackrabbit.spi.Path) TokenStream(org.apache.lucene.analysis.TokenStream) PayloadAttribute(org.apache.lucene.analysis.tokenattributes.PayloadAttribute) NodeState(org.apache.jackrabbit.core.state.NodeState) Document(org.apache.lucene.document.Document) FileSystemException(org.apache.jackrabbit.core.fs.FileSystemException) SAXException(org.xml.sax.SAXException) JournalException(org.apache.jackrabbit.core.journal.JournalException) NoSuchItemStateException(org.apache.jackrabbit.core.state.NoSuchItemStateException) RepositoryException(javax.jcr.RepositoryException) MalformedURLException(java.net.MalformedURLException) IOException(java.io.IOException) ItemStateException(org.apache.jackrabbit.core.state.ItemStateException) ParserConfigurationException(javax.xml.parsers.ParserConfigurationException) InvalidQueryException(javax.jcr.query.InvalidQueryException) PropertyState(org.apache.jackrabbit.core.state.PropertyState) SortField(org.apache.lucene.search.SortField) Field(org.apache.lucene.document.Field) NoSuchItemStateException(org.apache.jackrabbit.core.state.NoSuchItemStateException) Fieldable(org.apache.lucene.document.Fieldable) ItemStateManager(org.apache.jackrabbit.core.state.ItemStateManager) TermAttribute(org.apache.lucene.analysis.tokenattributes.TermAttribute)

Aggregations

Fieldable (org.apache.lucene.document.Fieldable)18 Document (org.apache.lucene.document.Document)10 Field (org.apache.lucene.document.Field)6 Term (org.apache.lucene.index.Term)4 TokenStream (org.apache.lucene.analysis.TokenStream)3 TermAttribute (org.apache.lucene.analysis.tokenattributes.TermAttribute)3 NumericField (org.apache.lucene.document.NumericField)3 TopDocs (org.apache.lucene.search.TopDocs)3 IOException (java.io.IOException)2 StringReader (java.io.StringReader)2 ArrayList (java.util.ArrayList)2 Date (java.util.Date)2 HashSet (java.util.HashSet)2 ItemStateException (org.apache.jackrabbit.core.state.ItemStateException)2 NoSuchItemStateException (org.apache.jackrabbit.core.state.NoSuchItemStateException)2 PropertyState (org.apache.jackrabbit.core.state.PropertyState)2 PayloadAttribute (org.apache.lucene.analysis.tokenattributes.PayloadAttribute)2 EmbeddedSortField (org.apache.lucene.document.EmbeddedSortField)2 IndexSearcher (org.apache.lucene.search.IndexSearcher)2 Query (org.apache.lucene.search.Query)2