use of org.apache.lucene.document.StoredField in project OpenGrok by OpenGrok.
the class PlainAnalyzer method analyze.
@Override
public void analyze(Document doc, StreamSource src, Writer xrefOut) throws IOException {
doc.add(new TextField(QueryBuilder.FULL, getReader(src.getStream())));
String fullpath = doc.get(QueryBuilder.FULLPATH);
if (fullpath != null && ctags != null) {
defs = ctags.doCtags(fullpath + "\n");
if (defs != null && defs.numberOfSymbols() > 0) {
doc.add(new TextField(QueryBuilder.DEFS, new IteratorReader(defs.getSymbols())));
//this is to explicitely use appropriate analyzers tokenstream to workaround #1376 symbols search works like full text search
TextField ref = new TextField(QueryBuilder.REFS, this.SymbolTokenizer);
this.SymbolTokenizer.setReader(getReader(src.getStream()));
doc.add(ref);
byte[] tags = defs.serialize();
doc.add(new StoredField(QueryBuilder.TAGS, tags));
}
}
if (xrefOut != null) {
try (Reader in = getReader(src.getStream())) {
writeXref(in, xrefOut);
}
Scopes scopes = xref.getScopes();
if (scopes.size() > 0) {
byte[] scopesSerialized = scopes.serialize();
doc.add(new StoredField(QueryBuilder.SCOPES, scopesSerialized));
}
}
}
use of org.apache.lucene.document.StoredField in project neo4j by neo4j.
the class FullTxData method add.
@Override
void add(TxDataHolder holder, EntityId entityId, String key, Object value) {
try {
ensureLuceneDataInstantiated();
long id = entityId.id();
Document document = findDocument(id);
boolean add = false;
if (document == null) {
document = IndexType.newDocument(entityId);
document.add(new StoredField(TX_STATE_KEY, TX_STATE_VALUE));
cachedDocuments.put(id, document);
add = true;
}
if (key == null && value == null) {
// Set a special "always hit" flag
document.add(new StringField(ORPHANS_KEY, ORPHANS_VALUE, Store.NO));
addOrphan(null);
} else if (value == null) {
// Set a special "always hit" flag
document.add(new StringField(ORPHANS_KEY, key, Store.NO));
addOrphan(key);
} else {
index.type.addToDocument(document, key, value);
}
if (add) {
writer.addDocument(document);
} else {
writer.updateDocument(index.type.idTerm(id), document);
}
invalidateSearcher();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
use of org.apache.lucene.document.StoredField in project neo4j by neo4j.
the class LuceneAllDocumentsReaderTest method createDocument.
private static Document createDocument(String value) {
Document document = new Document();
document.add(new StoredField("value", value));
return document;
}
use of org.apache.lucene.document.StoredField in project gerrit by GerritCodeReview.
the class AbstractLuceneIndex method add.
void add(Document doc, Values<V> values) {
String name = values.getField().getName();
FieldType<?> type = values.getField().getType();
Store store = store(values.getField());
if (type == FieldType.INTEGER || type == FieldType.INTEGER_RANGE) {
for (Object value : values.getValues()) {
doc.add(new IntField(name, (Integer) value, store));
}
} else if (type == FieldType.LONG) {
for (Object value : values.getValues()) {
doc.add(new LongField(name, (Long) value, store));
}
} else if (type == FieldType.TIMESTAMP) {
for (Object value : values.getValues()) {
doc.add(new LongField(name, ((Timestamp) value).getTime(), store));
}
} else if (type == FieldType.EXACT || type == FieldType.PREFIX) {
for (Object value : values.getValues()) {
doc.add(new StringField(name, (String) value, store));
}
} else if (type == FieldType.FULL_TEXT) {
for (Object value : values.getValues()) {
doc.add(new TextField(name, (String) value, store));
}
} else if (type == FieldType.STORED_ONLY) {
for (Object value : values.getValues()) {
doc.add(new StoredField(name, (byte[]) value));
}
} else {
throw FieldType.badFieldType(type);
}
}
use of org.apache.lucene.document.StoredField in project lucene-solr by apache.
the class TestFieldCache method testNonIndexedFields.
public void testNonIndexedFields() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new StoredField("bogusbytes", "bogus"));
doc.add(new StoredField("bogusshorts", "bogus"));
doc.add(new StoredField("bogusints", "bogus"));
doc.add(new StoredField("boguslongs", "bogus"));
doc.add(new StoredField("bogusfloats", "bogus"));
doc.add(new StoredField("bogusdoubles", "bogus"));
doc.add(new StoredField("bogusterms", "bogus"));
doc.add(new StoredField("bogustermsindex", "bogus"));
doc.add(new StoredField("bogusmultivalued", "bogus"));
doc.add(new StoredField("bogusbits", "bogus"));
iw.addDocument(doc);
DirectoryReader ir = iw.getReader();
iw.close();
LeafReader ar = getOnlyLeafReader(ir);
final FieldCache cache = FieldCache.DEFAULT;
cache.purgeAllCaches();
assertEquals(0, cache.getCacheEntries().length);
NumericDocValues ints = cache.getNumerics(ar, "bogusints", FieldCache.INT_POINT_PARSER);
assertEquals(NO_MORE_DOCS, ints.nextDoc());
NumericDocValues longs = cache.getNumerics(ar, "boguslongs", FieldCache.LONG_POINT_PARSER);
assertEquals(NO_MORE_DOCS, longs.nextDoc());
NumericDocValues floats = cache.getNumerics(ar, "bogusfloats", FieldCache.FLOAT_POINT_PARSER);
assertEquals(NO_MORE_DOCS, floats.nextDoc());
NumericDocValues doubles = cache.getNumerics(ar, "bogusdoubles", FieldCache.DOUBLE_POINT_PARSER);
assertEquals(NO_MORE_DOCS, doubles.nextDoc());
BinaryDocValues binaries = cache.getTerms(ar, "bogusterms");
assertEquals(NO_MORE_DOCS, binaries.nextDoc());
SortedDocValues sorted = cache.getTermsIndex(ar, "bogustermsindex");
assertEquals(NO_MORE_DOCS, sorted.nextDoc());
SortedSetDocValues sortedSet = cache.getDocTermOrds(ar, "bogusmultivalued", null);
assertEquals(NO_MORE_DOCS, sortedSet.nextDoc());
Bits bits = cache.getDocsWithField(ar, "bogusbits", null);
assertFalse(bits.get(0));
// check that we cached nothing
assertEquals(0, cache.getCacheEntries().length);
ir.close();
dir.close();
}
Aggregations