use of org.apache.lucene.analysis.MockAnalyzer in project lucene-solr by apache.
the class BaseDocValuesFormatTestCase method testThreads2.
/** Tests dv against stored fields with threads (all types + missing) */
@Slow
public void testThreads2() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
Field idField = new StringField("id", "", Field.Store.NO);
Field storedBinField = new StoredField("storedBin", new byte[0]);
Field dvBinField = new BinaryDocValuesField("dvBin", new BytesRef());
Field dvSortedField = new SortedDocValuesField("dvSorted", new BytesRef());
Field storedNumericField = new StoredField("storedNum", "");
Field dvNumericField = new NumericDocValuesField("dvNum", 0);
// index some docs
int numDocs = TestUtil.nextInt(random(), 1025, 2047);
for (int i = 0; i < numDocs; i++) {
idField.setStringValue(Integer.toString(i));
int length = TestUtil.nextInt(random(), 0, 8);
byte[] buffer = new byte[length];
random().nextBytes(buffer);
storedBinField.setBytesValue(buffer);
dvBinField.setBytesValue(buffer);
dvSortedField.setBytesValue(buffer);
long numericValue = random().nextLong();
storedNumericField.setStringValue(Long.toString(numericValue));
dvNumericField.setLongValue(numericValue);
Document doc = new Document();
doc.add(idField);
if (random().nextInt(4) > 0) {
doc.add(storedBinField);
doc.add(dvBinField);
doc.add(dvSortedField);
}
if (random().nextInt(4) > 0) {
doc.add(storedNumericField);
doc.add(dvNumericField);
}
int numSortedSetFields = random().nextInt(3);
Set<String> values = new TreeSet<>();
for (int j = 0; j < numSortedSetFields; j++) {
values.add(TestUtil.randomSimpleString(random()));
}
for (String v : values) {
doc.add(new SortedSetDocValuesField("dvSortedSet", new BytesRef(v)));
doc.add(new StoredField("storedSortedSet", v));
}
int numSortedNumericFields = random().nextInt(3);
Set<Long> numValues = new TreeSet<>();
for (int j = 0; j < numSortedNumericFields; j++) {
numValues.add(TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE));
}
for (Long l : numValues) {
doc.add(new SortedNumericDocValuesField("dvSortedNumeric", l));
doc.add(new StoredField("storedSortedNumeric", Long.toString(l)));
}
writer.addDocument(doc);
if (random().nextInt(31) == 0) {
writer.commit();
}
}
// delete some docs
int numDeletions = random().nextInt(numDocs / 10);
for (int i = 0; i < numDeletions; i++) {
int id = random().nextInt(numDocs);
writer.deleteDocuments(new Term("id", Integer.toString(id)));
}
writer.close();
// compare
final DirectoryReader ir = DirectoryReader.open(dir);
int numThreads = TestUtil.nextInt(random(), 2, 7);
Thread[] threads = new Thread[numThreads];
final CountDownLatch startingGun = new CountDownLatch(1);
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread() {
@Override
public void run() {
try {
startingGun.await();
for (LeafReaderContext context : ir.leaves()) {
LeafReader r = context.reader();
BinaryDocValues binaries = r.getBinaryDocValues("dvBin");
SortedDocValues sorted = r.getSortedDocValues("dvSorted");
NumericDocValues numerics = r.getNumericDocValues("dvNum");
SortedSetDocValues sortedSet = r.getSortedSetDocValues("dvSortedSet");
SortedNumericDocValues sortedNumeric = r.getSortedNumericDocValues("dvSortedNumeric");
for (int j = 0; j < r.maxDoc(); j++) {
BytesRef binaryValue = r.document(j).getBinaryValue("storedBin");
if (binaryValue != null) {
if (binaries != null) {
assertEquals(j, binaries.nextDoc());
BytesRef scratch = binaries.binaryValue();
assertEquals(binaryValue, scratch);
assertEquals(j, sorted.nextDoc());
scratch = sorted.binaryValue();
assertEquals(binaryValue, scratch);
}
}
String number = r.document(j).get("storedNum");
if (number != null) {
if (numerics != null) {
assertEquals(j, numerics.advance(j));
assertEquals(Long.parseLong(number), numerics.longValue());
}
}
String[] values = r.document(j).getValues("storedSortedSet");
if (values.length > 0) {
assertNotNull(sortedSet);
assertEquals(j, sortedSet.nextDoc());
for (int k = 0; k < values.length; k++) {
long ord = sortedSet.nextOrd();
assertTrue(ord != SortedSetDocValues.NO_MORE_ORDS);
BytesRef value = sortedSet.lookupOrd(ord);
assertEquals(values[k], value.utf8ToString());
}
assertEquals(SortedSetDocValues.NO_MORE_ORDS, sortedSet.nextOrd());
}
String[] numValues = r.document(j).getValues("storedSortedNumeric");
if (numValues.length > 0) {
assertNotNull(sortedNumeric);
assertEquals(j, sortedNumeric.nextDoc());
assertEquals(numValues.length, sortedNumeric.docValueCount());
for (int k = 0; k < numValues.length; k++) {
long v = sortedNumeric.nextValue();
assertEquals(numValues[k], Long.toString(v));
}
}
}
}
TestUtil.checkReader(ir);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
threads[i].start();
}
startingGun.countDown();
for (Thread t : threads) {
t.join();
}
ir.close();
dir.close();
}
use of org.apache.lucene.analysis.MockAnalyzer in project lucene-solr by apache.
the class BaseDocValuesFormatTestCase method testThreads3.
@Slow
public void testThreads3() throws Exception {
Directory dir = newFSDirectory(createTempDir());
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
int numSortedSets = random().nextInt(21);
int numBinaries = random().nextInt(21);
int numSortedNums = random().nextInt(21);
int numDocs = TestUtil.nextInt(random(), 2025, 2047);
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
for (int j = 0; j < numSortedSets; j++) {
doc.add(new SortedSetDocValuesField("ss" + j, new BytesRef(TestUtil.randomSimpleString(random()))));
doc.add(new SortedSetDocValuesField("ss" + j, new BytesRef(TestUtil.randomSimpleString(random()))));
}
for (int j = 0; j < numBinaries; j++) {
doc.add(new BinaryDocValuesField("b" + j, new BytesRef(TestUtil.randomSimpleString(random()))));
}
for (int j = 0; j < numSortedNums; j++) {
doc.add(new SortedNumericDocValuesField("sn" + j, TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE)));
doc.add(new SortedNumericDocValuesField("sn" + j, TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE)));
}
writer.addDocument(doc);
}
writer.close();
// now check with threads
for (int i = 0; i < 10; i++) {
final DirectoryReader r = DirectoryReader.open(dir);
final CountDownLatch startingGun = new CountDownLatch(1);
Thread[] threads = new Thread[TestUtil.nextInt(random(), 4, 10)];
for (int tid = 0; tid < threads.length; tid++) {
threads[tid] = new Thread() {
@Override
public void run() {
try {
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
PrintStream infoStream = new PrintStream(bos, false, IOUtils.UTF_8);
startingGun.await();
for (LeafReaderContext leaf : r.leaves()) {
DocValuesStatus status = CheckIndex.testDocValues((SegmentReader) leaf.reader(), infoStream, true);
if (status.error != null) {
throw status.error;
}
}
} catch (Throwable e) {
throw new RuntimeException(e);
}
}
};
}
for (int tid = 0; tid < threads.length; tid++) {
threads[tid].start();
}
startingGun.countDown();
for (int tid = 0; tid < threads.length; tid++) {
threads[tid].join();
}
r.close();
}
dir.close();
}
use of org.apache.lucene.analysis.MockAnalyzer in project lucene-solr by apache.
the class BaseDocValuesFormatTestCase method testBytes.
public void testBytes() throws IOException {
Analyzer analyzer = new MockAnalyzer(random());
Directory directory = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(analyzer);
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
Document doc = new Document();
String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
String text = "This is the text to be indexed. " + longTerm;
doc.add(newTextField("fieldname", text, Field.Store.YES));
doc.add(new BinaryDocValuesField("dv", new BytesRef("hello world")));
iwriter.addDocument(doc);
iwriter.close();
// Now search the index:
// read-only=true
IndexReader ireader = DirectoryReader.open(directory);
IndexSearcher isearcher = new IndexSearcher(ireader);
assertEquals(1, isearcher.search(new TermQuery(new Term("fieldname", longTerm)), 1).totalHits);
Query query = new TermQuery(new Term("fieldname", "text"));
TopDocs hits = isearcher.search(query, 1);
assertEquals(1, hits.totalHits);
// Iterate through the results:
for (int i = 0; i < hits.scoreDocs.length; i++) {
int hitDocID = hits.scoreDocs[i].doc;
Document hitDoc = isearcher.doc(hitDocID);
assertEquals(text, hitDoc.get("fieldname"));
assert ireader.leaves().size() == 1;
BinaryDocValues dv = ireader.leaves().get(0).reader().getBinaryDocValues("dv");
assertEquals(hitDocID, dv.advance(hitDocID));
assertEquals(new BytesRef("hello world"), dv.binaryValue());
}
ireader.close();
directory.close();
}
use of org.apache.lucene.analysis.MockAnalyzer in project lucene-solr by apache.
the class BaseDocValuesFormatTestCase method testSortedSetMergeAwayAllValuesLargeSegment.
// same as testSortedSetMergeAwayAllValues but on more than 1024 docs to have sparse encoding on
public void testSortedSetMergeAwayAllValuesLargeSegment() throws IOException {
Directory directory = newDirectory();
Analyzer analyzer = new MockAnalyzer(random());
IndexWriterConfig iwconfig = newIndexWriterConfig(analyzer);
iwconfig.setMergePolicy(newLogMergePolicy());
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
Document doc = new Document();
doc.add(new StringField("id", "1", Field.Store.NO));
doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
iwriter.addDocument(doc);
final int numEmptyDocs = atLeast(1024);
for (int i = 0; i < numEmptyDocs; ++i) {
iwriter.addDocument(new Document());
}
iwriter.commit();
iwriter.deleteDocuments(new Term("id", "1"));
iwriter.forceMerge(1);
DirectoryReader ireader = iwriter.getReader();
iwriter.close();
SortedSetDocValues dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field");
assertEquals(NO_MORE_DOCS, dv.nextDoc());
ireader.close();
directory.close();
}
use of org.apache.lucene.analysis.MockAnalyzer in project lucene-solr by apache.
the class BaseDocValuesFormatTestCase method testBytesWithNewline.
public void testBytesWithNewline() throws IOException {
Analyzer analyzer = new MockAnalyzer(random());
Directory directory = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(analyzer);
conf.setMergePolicy(newLogMergePolicy());
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
Document doc = new Document();
doc.add(new BinaryDocValuesField("dv", new BytesRef("hello\nworld\r1")));
iwriter.addDocument(doc);
iwriter.close();
// Now search the index:
// read-only=true
IndexReader ireader = DirectoryReader.open(directory);
assert ireader.leaves().size() == 1;
BinaryDocValues dv = ireader.leaves().get(0).reader().getBinaryDocValues("dv");
assertEquals(0, dv.nextDoc());
assertEquals(new BytesRef("hello\nworld\r1"), dv.binaryValue());
ireader.close();
directory.close();
}
Aggregations