use of org.apache.lucene.document.SortedDocValuesField in project lucene-solr by apache.
the class TestFieldCacheWithThreads method test2.
public void test2() throws Exception {
Random random = random();
final int NUM_DOCS = atLeast(100);
final Directory dir = newDirectory();
final RandomIndexWriter writer = new RandomIndexWriter(random, dir);
final boolean allowDups = random.nextBoolean();
final Set<String> seen = new HashSet<>();
if (VERBOSE) {
System.out.println("TEST: NUM_DOCS=" + NUM_DOCS + " allowDups=" + allowDups);
}
int numDocs = 0;
final List<BytesRef> docValues = new ArrayList<>();
// TODO: deletions
while (numDocs < NUM_DOCS) {
final String s;
if (random.nextBoolean()) {
s = TestUtil.randomSimpleString(random);
} else {
s = TestUtil.randomUnicodeString(random);
}
final BytesRef br = new BytesRef(s);
if (!allowDups) {
if (seen.contains(s)) {
continue;
}
seen.add(s);
}
if (VERBOSE) {
System.out.println(" " + numDocs + ": s=" + s);
}
final Document doc = new Document();
doc.add(new SortedDocValuesField("stringdv", br));
doc.add(new NumericDocValuesField("id", numDocs));
docValues.add(br);
writer.addDocument(doc);
numDocs++;
if (random.nextInt(40) == 17) {
// force flush
writer.getReader().close();
}
}
writer.forceMerge(1);
final DirectoryReader r = writer.getReader();
writer.close();
final LeafReader sr = getOnlyLeafReader(r);
final long END_TIME = System.nanoTime() + TimeUnit.NANOSECONDS.convert((TEST_NIGHTLY ? 30 : 1), TimeUnit.SECONDS);
final int NUM_THREADS = TestUtil.nextInt(random(), 1, 10);
Thread[] threads = new Thread[NUM_THREADS];
for (int thread = 0; thread < NUM_THREADS; thread++) {
threads[thread] = new Thread() {
@Override
public void run() {
Random random = random();
final SortedDocValues stringDVDirect;
final NumericDocValues docIDToID;
try {
stringDVDirect = sr.getSortedDocValues("stringdv");
docIDToID = sr.getNumericDocValues("id");
assertNotNull(stringDVDirect);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
int[] docIDToIDArray = new int[sr.maxDoc()];
for (int i = 0; i < sr.maxDoc(); i++) {
try {
assertEquals(i, docIDToID.nextDoc());
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
try {
docIDToIDArray[i] = (int) docIDToID.longValue();
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
while (System.nanoTime() < END_TIME) {
for (int iter = 0; iter < 100; iter++) {
final int docID = random.nextInt(sr.maxDoc());
try {
SortedDocValues dvs = sr.getSortedDocValues("stringdv");
assertEquals(docID, dvs.advance(docID));
assertEquals(docValues.get(docIDToIDArray[docID]), dvs.binaryValue());
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
}
}
};
threads[thread].start();
}
for (Thread thread : threads) {
thread.join();
}
r.close();
dir.close();
}
use of org.apache.lucene.document.SortedDocValuesField in project lucene-solr by apache.
the class TestSlowCompositeReaderWrapper method testOrdMapsAreCached.
public void testOrdMapsAreCached() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE));
Document doc = new Document();
doc.add(new SortedDocValuesField("sorted", new BytesRef("a")));
doc.add(new SortedSetDocValuesField("sorted_set", new BytesRef("b")));
doc.add(new SortedSetDocValuesField("sorted_set", new BytesRef("c")));
w.addDocument(doc);
w.getReader().close();
doc = new Document();
doc.add(new SortedDocValuesField("sorted", new BytesRef("b")));
doc.add(new SortedSetDocValuesField("sorted_set", new BytesRef("c")));
doc.add(new SortedSetDocValuesField("sorted_set", new BytesRef("d")));
w.addDocument(doc);
IndexReader reader = w.getReader();
assertTrue(reader.leaves().size() > 1);
SlowCompositeReaderWrapper slowWrapper = (SlowCompositeReaderWrapper) SlowCompositeReaderWrapper.wrap(reader);
assertEquals(0, slowWrapper.cachedOrdMaps.size());
assertEquals(MultiSortedDocValues.class, slowWrapper.getSortedDocValues("sorted").getClass());
assertEquals(1, slowWrapper.cachedOrdMaps.size());
assertEquals(MultiSortedSetDocValues.class, slowWrapper.getSortedSetDocValues("sorted_set").getClass());
assertEquals(2, slowWrapper.cachedOrdMaps.size());
reader.close();
w.close();
dir.close();
}
use of org.apache.lucene.document.SortedDocValuesField in project lucene-solr by apache.
the class TestIndexWriterOnVMError method doTest.
// just one thread, serial merge policy, hopefully debuggable
private void doTest(MockDirectoryWrapper.Failure failOn) throws Exception {
// log all exceptions we hit, in case we fail (for debugging)
ByteArrayOutputStream exceptionLog = new ByteArrayOutputStream();
PrintStream exceptionStream = new PrintStream(exceptionLog, true, "UTF-8");
//PrintStream exceptionStream = System.out;
final long analyzerSeed = random().nextLong();
final Analyzer analyzer = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
MockTokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
// we are gonna make it angry
tokenizer.setEnableChecks(false);
TokenStream stream = tokenizer;
// emit some payloads
if (fieldName.contains("payloads")) {
stream = new MockVariableLengthPayloadFilter(new Random(analyzerSeed), stream);
}
return new TokenStreamComponents(tokenizer, stream);
}
};
MockDirectoryWrapper dir = null;
final int numIterations = TEST_NIGHTLY ? atLeast(100) : atLeast(5);
STARTOVER: for (int iter = 0; iter < numIterations; iter++) {
try {
// close from last run
if (dir != null) {
dir.close();
}
// disable slow things: we don't rely upon sleeps here.
dir = newMockDirectory();
dir.setThrottling(MockDirectoryWrapper.Throttling.NEVER);
dir.setUseSlowOpenClosers(false);
IndexWriterConfig conf = newIndexWriterConfig(analyzer);
// just for now, try to keep this test reproducible
conf.setMergeScheduler(new SerialMergeScheduler());
// test never makes it this far...
int numDocs = atLeast(2000);
IndexWriter iw = new IndexWriter(dir, conf);
// ensure there is always a commit
iw.commit();
dir.failOn(failOn);
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
doc.add(newStringField("id", Integer.toString(i), Field.Store.NO));
doc.add(new NumericDocValuesField("dv", i));
doc.add(new BinaryDocValuesField("dv2", new BytesRef(Integer.toString(i))));
doc.add(new SortedDocValuesField("dv3", new BytesRef(Integer.toString(i))));
doc.add(new SortedSetDocValuesField("dv4", new BytesRef(Integer.toString(i))));
doc.add(new SortedSetDocValuesField("dv4", new BytesRef(Integer.toString(i - 1))));
doc.add(new SortedNumericDocValuesField("dv5", i));
doc.add(new SortedNumericDocValuesField("dv5", i - 1));
doc.add(newTextField("text1", TestUtil.randomAnalysisString(random(), 20, true), Field.Store.NO));
// ensure we store something
doc.add(new StoredField("stored1", "foo"));
doc.add(new StoredField("stored1", "bar"));
// ensure we get some payloads
doc.add(newTextField("text_payloads", TestUtil.randomAnalysisString(random(), 6, true), Field.Store.NO));
// ensure we get some vectors
FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
ft.setStoreTermVectors(true);
doc.add(newField("text_vectors", TestUtil.randomAnalysisString(random(), 6, true), ft));
doc.add(new IntPoint("point", random().nextInt()));
doc.add(new IntPoint("point2d", random().nextInt(), random().nextInt()));
if (random().nextInt(10) > 0) {
// single doc
try {
iw.addDocument(doc);
// we made it, sometimes delete our doc, or update a dv
int thingToDo = random().nextInt(4);
if (thingToDo == 0) {
iw.deleteDocuments(new Term("id", Integer.toString(i)));
} else if (thingToDo == 1) {
iw.updateNumericDocValue(new Term("id", Integer.toString(i)), "dv", i + 1L);
} else if (thingToDo == 2) {
iw.updateBinaryDocValue(new Term("id", Integer.toString(i)), "dv2", new BytesRef(Integer.toString(i + 1)));
}
} catch (VirtualMachineError | AlreadyClosedException disaster) {
getTragedy(disaster, iw, exceptionStream);
continue STARTOVER;
}
} else {
// block docs
Document doc2 = new Document();
doc2.add(newStringField("id", Integer.toString(-i), Field.Store.NO));
doc2.add(newTextField("text1", TestUtil.randomAnalysisString(random(), 20, true), Field.Store.NO));
doc2.add(new StoredField("stored1", "foo"));
doc2.add(new StoredField("stored1", "bar"));
doc2.add(newField("text_vectors", TestUtil.randomAnalysisString(random(), 6, true), ft));
try {
iw.addDocuments(Arrays.asList(doc, doc2));
// we made it, sometimes delete our docs
if (random().nextBoolean()) {
iw.deleteDocuments(new Term("id", Integer.toString(i)), new Term("id", Integer.toString(-i)));
}
} catch (VirtualMachineError | AlreadyClosedException disaster) {
getTragedy(disaster, iw, exceptionStream);
continue STARTOVER;
}
}
if (random().nextInt(10) == 0) {
// trigger flush:
try {
if (random().nextBoolean()) {
DirectoryReader ir = null;
try {
ir = DirectoryReader.open(iw, random().nextBoolean(), false);
TestUtil.checkReader(ir);
} finally {
IOUtils.closeWhileHandlingException(ir);
}
} else {
iw.commit();
}
if (DirectoryReader.indexExists(dir)) {
TestUtil.checkIndex(dir);
}
} catch (VirtualMachineError | AlreadyClosedException disaster) {
getTragedy(disaster, iw, exceptionStream);
continue STARTOVER;
}
}
}
try {
iw.close();
} catch (VirtualMachineError | AlreadyClosedException disaster) {
getTragedy(disaster, iw, exceptionStream);
continue STARTOVER;
}
} catch (Throwable t) {
System.out.println("Unexpected exception: dumping fake-exception-log:...");
exceptionStream.flush();
System.out.println(exceptionLog.toString("UTF-8"));
System.out.flush();
Rethrow.rethrow(t);
}
}
dir.close();
if (VERBOSE) {
System.out.println("TEST PASSED: dumping fake-exception-log:...");
System.out.println(exceptionLog.toString("UTF-8"));
}
}
use of org.apache.lucene.document.SortedDocValuesField in project jackrabbit-oak by apache.
the class LuceneDocumentMaker method addTypedOrderedFields.
private boolean addTypedOrderedFields(List<Field> fields, PropertyState property, String pname, PropertyDefinition pd) {
// Ignore and warn if property multi-valued as not supported
if (property.getType().isArray()) {
log.warn("[{}] Ignoring ordered property {} of type {} for path {} as multivalued ordered property not supported", getIndexName(), pname, Type.fromTag(property.getType().tag(), true), path);
return false;
}
int tag = property.getType().tag();
int idxDefinedTag = pd.getType();
// Try converting type to the defined type in the index definition
if (tag != idxDefinedTag) {
log.debug("[{}] Ordered property defined with type {} differs from property {} with type {} in " + "path {}", getIndexName(), Type.fromTag(idxDefinedTag, false), property.toString(), Type.fromTag(tag, false), path);
tag = idxDefinedTag;
}
String name = FieldNames.createDocValFieldName(pname);
boolean fieldAdded = false;
Field f = null;
try {
if (tag == Type.LONG.tag()) {
//TODO Distinguish fields which need to be used for search and for sort
//If a field is only used for Sort then it can be stored with less precision
f = new NumericDocValuesField(name, property.getValue(Type.LONG));
} else if (tag == Type.DATE.tag()) {
String date = property.getValue(Type.DATE);
f = new NumericDocValuesField(name, FieldFactory.dateToLong(date));
} else if (tag == Type.DOUBLE.tag()) {
f = new DoubleDocValuesField(name, property.getValue(Type.DOUBLE));
} else if (tag == Type.BOOLEAN.tag()) {
f = new SortedDocValuesField(name, new BytesRef(property.getValue(Type.BOOLEAN).toString()));
} else if (tag == Type.STRING.tag()) {
f = new SortedDocValuesField(name, new BytesRef(property.getValue(Type.STRING)));
}
if (f != null) {
fields.add(f);
fieldAdded = true;
}
} catch (Exception e) {
log.warn("[{}] Ignoring ordered property. Could not convert property {} of type {} to type {} for path {}", getIndexName(), pname, Type.fromTag(property.getType().tag(), false), Type.fromTag(tag, false), path, e);
}
return fieldAdded;
}
use of org.apache.lucene.document.SortedDocValuesField in project lucene-solr by apache.
the class TestLucene54DocValuesFormat method doTestSparseDocValuesVsStoredFields.
private void doTestSparseDocValuesVsStoredFields() throws Exception {
final long[] values = new long[TestUtil.nextInt(random(), 1, 500)];
for (int i = 0; i < values.length; ++i) {
values[i] = random().nextLong();
}
Directory dir = newFSDirectory(createTempDir());
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
conf.setMergeScheduler(new SerialMergeScheduler());
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
// sparse compression is only enabled if less than 1% of docs have a value
final int avgGap = 100;
final int numDocs = atLeast(200);
for (int i = random().nextInt(avgGap * 2); i >= 0; --i) {
writer.addDocument(new Document());
}
final int maxNumValuesPerDoc = random().nextBoolean() ? 1 : TestUtil.nextInt(random(), 2, 5);
for (int i = 0; i < numDocs; ++i) {
Document doc = new Document();
// single-valued
long docValue = values[random().nextInt(values.length)];
doc.add(new NumericDocValuesField("numeric", docValue));
doc.add(new SortedDocValuesField("sorted", new BytesRef(Long.toString(docValue))));
doc.add(new BinaryDocValuesField("binary", new BytesRef(Long.toString(docValue))));
doc.add(new StoredField("value", docValue));
// multi-valued
final int numValues = TestUtil.nextInt(random(), 1, maxNumValuesPerDoc);
for (int j = 0; j < numValues; ++j) {
docValue = values[random().nextInt(values.length)];
doc.add(new SortedNumericDocValuesField("sorted_numeric", docValue));
doc.add(new SortedSetDocValuesField("sorted_set", new BytesRef(Long.toString(docValue))));
doc.add(new StoredField("values", docValue));
}
writer.addDocument(doc);
// add a gap
for (int j = TestUtil.nextInt(random(), 0, avgGap * 2); j >= 0; --j) {
writer.addDocument(new Document());
}
}
if (random().nextBoolean()) {
writer.forceMerge(1);
}
final IndexReader indexReader = writer.getReader();
TestUtil.checkReader(indexReader);
writer.close();
for (LeafReaderContext context : indexReader.leaves()) {
final LeafReader reader = context.reader();
final NumericDocValues numeric = DocValues.getNumeric(reader, "numeric");
final SortedDocValues sorted = DocValues.getSorted(reader, "sorted");
final BinaryDocValues binary = DocValues.getBinary(reader, "binary");
final SortedNumericDocValues sortedNumeric = DocValues.getSortedNumeric(reader, "sorted_numeric");
final SortedSetDocValues sortedSet = DocValues.getSortedSet(reader, "sorted_set");
for (int i = 0; i < reader.maxDoc(); ++i) {
final Document doc = reader.document(i);
final IndexableField valueField = doc.getField("value");
final Long value = valueField == null ? null : valueField.numericValue().longValue();
if (value == null) {
assertTrue(numeric.docID() + " vs " + i, numeric.docID() < i);
} else {
assertEquals(i, numeric.nextDoc());
assertEquals(i, binary.nextDoc());
assertEquals(i, sorted.nextDoc());
assertEquals(value.longValue(), numeric.longValue());
assertTrue(sorted.ordValue() >= 0);
assertEquals(new BytesRef(Long.toString(value)), sorted.lookupOrd(sorted.ordValue()));
assertEquals(new BytesRef(Long.toString(value)), binary.binaryValue());
}
final IndexableField[] valuesFields = doc.getFields("values");
if (valuesFields.length == 0) {
assertTrue(sortedNumeric.docID() + " vs " + i, sortedNumeric.docID() < i);
} else {
final Set<Long> valueSet = new HashSet<>();
for (IndexableField sf : valuesFields) {
valueSet.add(sf.numericValue().longValue());
}
assertEquals(i, sortedNumeric.nextDoc());
assertEquals(valuesFields.length, sortedNumeric.docValueCount());
for (int j = 0; j < sortedNumeric.docValueCount(); ++j) {
assertTrue(valueSet.contains(sortedNumeric.nextValue()));
}
assertEquals(i, sortedSet.nextDoc());
int sortedSetCount = 0;
while (true) {
long ord = sortedSet.nextOrd();
if (ord == SortedSetDocValues.NO_MORE_ORDS) {
break;
}
assertTrue(valueSet.contains(Long.parseLong(sortedSet.lookupOrd(ord).utf8ToString())));
sortedSetCount++;
}
assertEquals(valueSet.size(), sortedSetCount);
}
}
}
indexReader.close();
dir.close();
}
Aggregations