use of org.apache.lucene.document.Fieldable in project neo4j-mobile-android by neo4j-contrib.
the class IndexType method instantiateField.
Fieldable instantiateField(String key, Object value, Index analyzed) {
Fieldable field = null;
if (value instanceof Number) {
Number number = (Number) value;
NumericField numberField = new NumericField(key, Store.YES, true);
if (value instanceof Long) {
numberField.setLongValue(number.longValue());
} else if (value instanceof Float) {
numberField.setFloatValue(number.floatValue());
} else if (value instanceof Double) {
numberField.setDoubleValue(number.doubleValue());
} else {
numberField.setIntValue(number.intValue());
}
field = numberField;
} else {
field = new Field(key, value.toString(), Store.YES, analyzed);
}
return field;
}
use of org.apache.lucene.document.Fieldable in project neo4j-mobile-android by neo4j-contrib.
the class LuceneBatchInserterIndex method removeFromCache.
private void removeFromCache(long entityId) throws IOException, CorruptIndexException {
IndexSearcher searcher = searcher();
Query query = type.idTermQuery(entityId);
TopDocs docs = searcher.search(query, 1);
if (docs.totalHits > 0) {
Document document = searcher.doc(docs.scoreDocs[0].doc);
for (Fieldable field : document.getFields()) {
String key = field.name();
Object value = field.stringValue();
removeFromCache(entityId, key, value);
}
}
}
use of org.apache.lucene.document.Fieldable in project jackrabbit by apache.
the class AbstractExcerpt method getExcerpt.
/**
* {@inheritDoc}
*/
public String getExcerpt(NodeId id, int maxFragments, int maxFragmentSize) throws IOException {
IndexReader reader = index.getIndexReader();
try {
checkRewritten(reader);
Term idTerm = TermFactory.createUUIDTerm(id.toString());
TermDocs tDocs = reader.termDocs(idTerm);
int docNumber;
Document doc;
try {
if (tDocs.next()) {
docNumber = tDocs.doc();
doc = reader.document(docNumber);
} else {
// node not found in index
return null;
}
} finally {
tDocs.close();
}
Fieldable[] fields = doc.getFieldables(FieldNames.FULLTEXT);
if (fields.length == 0) {
log.debug("Fulltext field not stored, using {}", SimpleExcerptProvider.class.getName());
SimpleExcerptProvider exProvider = new SimpleExcerptProvider();
exProvider.init(query, index);
return exProvider.getExcerpt(id, maxFragments, maxFragmentSize);
}
StringBuffer text = new StringBuffer();
String separator = "";
for (int i = 0; i < fields.length; i++) {
if (fields[i].stringValue().length() == 0) {
continue;
}
text.append(separator);
text.append(fields[i].stringValue());
separator = " ";
}
TermFreqVector tfv = reader.getTermFreqVector(docNumber, FieldNames.FULLTEXT);
if (tfv instanceof TermPositionVector) {
return createExcerpt((TermPositionVector) tfv, text.toString(), maxFragments, maxFragmentSize);
} else {
log.debug("No TermPositionVector on Fulltext field.");
return null;
}
} finally {
Util.closeOrRelease(reader);
}
}
use of org.apache.lucene.document.Fieldable in project jackrabbit by apache.
the class AbstractIndex method getFinishedDocument.
/**
* Returns a document that is finished with text extraction and is ready to
* be added to the index.
*
* @param doc the document to check.
* @return <code>doc</code> if it is finished already or a stripped down
* copy of <code>doc</code> without text extractors.
* @throws IOException if the document cannot be added to the indexing
* queue.
*/
private Document getFinishedDocument(Document doc) throws IOException {
if (!Util.isDocumentReady(doc)) {
Document copy = new Document();
// mark the document that reindexing is required
copy.add(new Field(FieldNames.REINDEXING_REQUIRED, false, "", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS, Field.TermVector.NO));
for (Fieldable f : doc.getFields()) {
Fieldable field = null;
Field.TermVector tv = getTermVectorParameter(f);
Field.Store stored = f.isStored() ? Field.Store.YES : Field.Store.NO;
Field.Index indexed = getIndexParameter(f);
if (f instanceof LazyTextExtractorField || f.readerValue() != null) {
// replace all readers with empty string reader
field = new Field(f.name(), new StringReader(""), tv);
} else if (f.stringValue() != null) {
field = new Field(f.name(), false, f.stringValue(), stored, indexed, tv);
} else if (f.isBinary()) {
field = new Field(f.name(), f.getBinaryValue(), stored);
} else if (f.tokenStreamValue() != null && f.tokenStreamValue() instanceof SingletonTokenStream) {
TokenStream tokenStream = f.tokenStreamValue();
TermAttribute termAttribute = tokenStream.addAttribute(TermAttribute.class);
PayloadAttribute payloadAttribute = tokenStream.addAttribute(PayloadAttribute.class);
tokenStream.incrementToken();
String value = new String(termAttribute.termBuffer(), 0, termAttribute.termLength());
tokenStream.reset();
field = new Field(f.name(), new SingletonTokenStream(value, (Payload) payloadAttribute.getPayload().clone()));
}
if (field != null) {
field.setOmitNorms(f.getOmitNorms());
copy.add(field);
}
}
// schedule the original document for later indexing
Document existing = indexingQueue.addDocument(doc);
if (existing != null) {
// the queue already contained a pending document for this
// node. -> dispose the document
Util.disposeDocument(existing);
}
// use the stripped down copy for now
doc = copy;
}
return doc;
}
use of org.apache.lucene.document.Fieldable in project jackrabbit by apache.
the class SearchIndex method mergeAggregatedNodeIndexes.
/**
* Merges the fulltext indexed fields of the aggregated node states into
* <code>doc</code>.
*
* @param state the node state on which <code>doc</code> was created.
* @param doc the lucene document with index fields from <code>state</code>.
* @param ifv the current index format version.
*/
protected void mergeAggregatedNodeIndexes(NodeState state, Document doc, IndexFormatVersion ifv) {
if (indexingConfig != null) {
AggregateRule[] aggregateRules = indexingConfig.getAggregateRules();
if (aggregateRules == null) {
return;
}
try {
ItemStateManager ism = getContext().getItemStateManager();
for (AggregateRule aggregateRule : aggregateRules) {
boolean ruleMatched = false;
// node includes
NodeState[] aggregates = aggregateRule.getAggregatedNodeStates(state);
if (aggregates != null) {
ruleMatched = true;
for (NodeState aggregate : aggregates) {
Document aDoc = createDocument(aggregate, getNamespaceMappings(), ifv);
// transfer fields to doc if there are any
Fieldable[] fulltextFields = aDoc.getFieldables(FieldNames.FULLTEXT);
if (fulltextFields != null) {
for (Fieldable fulltextField : fulltextFields) {
doc.add(fulltextField);
}
doc.add(new Field(FieldNames.AGGREGATED_NODE_UUID, false, aggregate.getNodeId().toString(), Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS, Field.TermVector.NO));
}
}
// make sure that fulltext fields are aligned properly
// first all stored fields, then remaining
Fieldable[] fulltextFields = doc.getFieldables(FieldNames.FULLTEXT);
doc.removeFields(FieldNames.FULLTEXT);
Arrays.sort(fulltextFields, FIELDS_COMPARATOR_STORED);
for (Fieldable f : fulltextFields) {
doc.add(f);
}
}
// property includes
PropertyState[] propStates = aggregateRule.getAggregatedPropertyStates(state);
if (propStates != null) {
ruleMatched = true;
for (PropertyState propState : propStates) {
String namePrefix = FieldNames.createNamedValue(getNamespaceMappings().translateName(propState.getName()), "");
NodeState parent = (NodeState) ism.getItemState(propState.getParentId());
Document aDoc = createDocument(parent, getNamespaceMappings(), ifv);
try {
// find the right fields to transfer
Fieldable[] fields = aDoc.getFieldables(FieldNames.PROPERTIES);
for (Fieldable field : fields) {
// assume properties fields use SingleTokenStream
TokenStream tokenStream = field.tokenStreamValue();
TermAttribute termAttribute = tokenStream.addAttribute(TermAttribute.class);
PayloadAttribute payloadAttribute = tokenStream.addAttribute(PayloadAttribute.class);
tokenStream.incrementToken();
tokenStream.end();
tokenStream.close();
String value = new String(termAttribute.termBuffer(), 0, termAttribute.termLength());
if (value.startsWith(namePrefix)) {
// extract value
String rawValue = value.substring(namePrefix.length());
// create new named value
Path p = getRelativePath(state, propState);
String path = getNamespaceMappings().translatePath(p);
value = FieldNames.createNamedValue(path, rawValue);
termAttribute.setTermBuffer(value);
PropertyMetaData pdm = PropertyMetaData.fromByteArray(payloadAttribute.getPayload().getData());
doc.add(new Field(field.name(), new SingletonTokenStream(value, pdm.getPropertyType())));
doc.add(new Field(FieldNames.AGGREGATED_NODE_UUID, false, parent.getNodeId().toString(), Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS, Field.TermVector.NO));
if (pdm.getPropertyType() == PropertyType.STRING) {
// add to fulltext index
Field ft = new Field(FieldNames.FULLTEXT, false, rawValue, Field.Store.YES, Field.Index.ANALYZED_NO_NORMS, Field.TermVector.NO);
doc.add(ft);
}
}
}
} finally {
Util.disposeDocument(aDoc);
}
}
}
// only use first aggregate definition that matches
if (ruleMatched) {
break;
}
}
} catch (NoSuchItemStateException e) {
// do not fail if aggregate cannot be created
log.info("Exception while building indexing aggregate for {}. Node is not available {}.", state.getNodeId(), e.getMessage());
} catch (Exception e) {
// do not fail if aggregate cannot be created
log.warn("Exception while building indexing aggregate for " + state.getNodeId(), e);
}
}
}
Aggregations