use of org.apache.solr.common.SolrInputDocument in project lucene-solr by apache.
the class TestFiltering method testRandomFiltering.
@Test
public void testRandomFiltering() throws Exception {
int indexIter = 5 * RANDOM_MULTIPLIER;
int queryIter = 250 * RANDOM_MULTIPLIER;
Model model = new Model();
for (int iiter = 0; iiter < indexIter; iiter++) {
model.indexSize = random().nextInt(40 * RANDOM_MULTIPLIER) + 1;
clearIndex();
for (int i = 0; i < model.indexSize; i++) {
String val = Integer.toString(i);
SolrInputDocument doc = sdoc("id", val, f, val, f_s, f_s(i));
updateJ(jsonAdd(doc), null);
if (random().nextInt(100) < 20) {
// duplicate doc 20% of the time (makes deletions)
updateJ(jsonAdd(doc), null);
}
if (random().nextInt(100) < 10) {
// commit 10% of the time (forces a new segment)
assertU(commit());
}
}
assertU(commit());
// sanity check
assertJQ(req("q", "*:*"), "/response/numFound==" + model.indexSize);
int totalMatches = 0;
int nonZeros = 0;
for (int qiter = 0; qiter < queryIter; qiter++) {
model.clear();
List<String> params = new ArrayList<>();
params.add("q");
params.add(makeRandomQuery(model, true, false));
int nFilters = random().nextInt(5);
for (int i = 0; i < nFilters; i++) {
params.add("fq");
params.add(makeRandomQuery(model, false, false));
}
boolean facet = random().nextBoolean();
if (facet) {
// basic facet.query tests getDocListAndSet
params.add("facet");
params.add("true");
params.add("facet.query");
params.add("*:*");
params.add("facet.query");
params.add("{!key=multiSelect ex=t}*:*");
String facetQuery = makeRandomQuery(model, false, true);
if (facetQuery.startsWith("{!")) {
facetQuery = "{!key=facetQuery " + facetQuery.substring(2);
} else {
facetQuery = "{!key=facetQuery}" + facetQuery;
}
params.add("facet.query");
params.add(facetQuery);
}
if (random().nextInt(100) < 10) {
params.add("group");
params.add("true");
params.add("group.main");
params.add("true");
params.add("group.field");
params.add("id");
if (random().nextBoolean()) {
params.add("group.cache.percent");
params.add("100");
}
}
SolrQueryRequest sreq = req(params.toArray(new String[params.size()]));
long expected = model.answer.cardinality();
long expectedMultiSelect = model.multiSelect.cardinality();
long expectedFacetQuery = model.facetQuery.cardinality();
totalMatches += expected;
if (expected > 0) {
nonZeros++;
}
if (iiter == -1 && qiter == -1) {
// set breakpoint here to debug a specific issue
System.out.println("request=" + params);
}
try {
assertJQ(sreq, "/response/numFound==" + expected, facet ? "/facet_counts/facet_queries/*:*/==" + expected : null, facet ? "/facet_counts/facet_queries/multiSelect/==" + expectedMultiSelect : null, facet ? "/facet_counts/facet_queries/facetQuery/==" + expectedFacetQuery : null);
} catch (Exception e) {
// show the indexIter and queryIter for easier debugging
SolrException.log(log, e);
String s = "FAILURE: indexSize=" + model.indexSize + " iiter=" + iiter + " qiter=" + qiter + " request=" + params;
log.error(s);
fail(s);
}
}
// After making substantial changes to this test, make sure that we still get a
// decent number of queries that match some documents
// System.out.println("totalMatches=" + totalMatches + " nonZeroQueries="+nonZeros);
}
}
use of org.apache.solr.common.SolrInputDocument in project lucene-solr by apache.
the class TestOverriddenPrefixQueryForCustomFieldType method createIndex.
public void createIndex(int nDocs) {
Random r = random();
for (int i = 0; i < nDocs; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", "" + i);
int t = r.nextInt(1000);
if (t % 3 == 0) {
doc.addField("swap_foo_bar_in_prefix_query", "foo" + i);
counts[0]++;
} else if (t % 3 == 1) {
doc.addField("swap_foo_bar_in_prefix_query", "foo" + i);
doc.addField("swap_foo_bar_in_prefix_query", "spam" + i);
otherCounts++;
counts[0]++;
} else {
doc.addField("swap_foo_bar_in_prefix_query", "bar" + i);
counts[1]++;
}
//Randomly add noise
doc.addField("int_prefix_as_range", i);
doc.addField("intfield", i);
assertU(adoc(doc));
}
assertU(commit());
}
use of org.apache.solr.common.SolrInputDocument in project lucene-solr by apache.
the class AddBlockUpdateTest method testXML.
//This is the same as testSolrJXML above but uses the XMLLoader
// to illustrate the structure of the XML documents
@Test
public void testXML() throws IOException, XMLStreamException {
UpdateRequest req = new UpdateRequest();
List<SolrInputDocument> docs = new ArrayList<>();
String xml_doc1 = "<doc >" + " <field name=\"id\">1</field>" + " <field name=\"parent_s\">X</field>" + "<doc> " + " <field name=\"id\" >2</field>" + " <field name=\"child_s\">y</field>" + "</doc>" + "<doc> " + " <field name=\"id\" >3</field>" + " <field name=\"child_s\">z</field>" + "</doc>" + "</doc>";
String xml_doc2 = "<doc >" + " <field name=\"id\">4</field>" + " <field name=\"parent_s\">A</field>" + "<doc> " + " <field name=\"id\" >5</field>" + " <field name=\"child_s\">b</field>" + "</doc>" + "<doc> " + " <field name=\"id\" >6</field>" + " <field name=\"child_s\">c</field>" + "</doc>" + "</doc>";
XMLStreamReader parser = inputFactory.createXMLStreamReader(new StringReader(xml_doc1));
// read the START document...
parser.next();
//null for the processor is all right here
XMLLoader loader = new XMLLoader();
SolrInputDocument document1 = loader.readDoc(parser);
XMLStreamReader parser2 = inputFactory.createXMLStreamReader(new StringReader(xml_doc2));
// read the START document...
parser2.next();
//null for the processor is all right here
//XMLLoader loader = new XMLLoader();
SolrInputDocument document2 = loader.readDoc(parser2);
docs.add(document1);
docs.add(document2);
Collections.shuffle(docs, random());
req.add(docs);
RequestWriter requestWriter = new RequestWriter();
OutputStream os = new ByteArrayOutputStream();
requestWriter.write(req, os);
assertBlockU(os.toString());
assertU(commit());
final SolrIndexSearcher searcher = getSearcher();
assertSingleParentOf(searcher, one("yz"), "X");
assertSingleParentOf(searcher, one("bc"), "A");
}
use of org.apache.solr.common.SolrInputDocument in project lucene-solr by apache.
the class DocumentBuilderTest method testMoveLargestLast.
public void testMoveLargestLast() {
SolrInputDocument inDoc = new SolrInputDocument();
// not stored. It won't be moved. This value is the longest, however.
String TEXT_FLD = "text";
inDoc.addField(TEXT_FLD, "NOT STORED|" + RandomStrings.randomAsciiOfLength(random(), 4 * DocumentBuilder.MIN_LENGTH_TO_MOVE_LAST));
// stored, multiValued
String CAT_FLD = "cat";
inDoc.addField(CAT_FLD, "STORED V1|");
// pretty long value
inDoc.addField(CAT_FLD, "STORED V2|" + RandomStrings.randomAsciiOfLength(random(), 2 * DocumentBuilder.MIN_LENGTH_TO_MOVE_LAST));
inDoc.addField(CAT_FLD, "STORED V3|" + RandomStrings.randomAsciiOfLength(random(), DocumentBuilder.MIN_LENGTH_TO_MOVE_LAST));
// stored. This value is long, but not long enough.
String SUBJECT_FLD = "subject";
inDoc.addField(SUBJECT_FLD, "2ndplace|" + RandomStrings.randomAsciiOfLength(random(), DocumentBuilder.MIN_LENGTH_TO_MOVE_LAST));
Document outDoc = DocumentBuilder.toDocument(inDoc, h.getCore().getLatestSchema());
// filter outDoc by stored fields; convert to list.
List<IndexableField> storedFields = StreamSupport.stream(outDoc.spliterator(), false).filter(f -> f.fieldType().stored()).collect(Collectors.toList());
// clip to last 3. We expect these to be for CAT_FLD
storedFields = storedFields.subList(storedFields.size() - 3, storedFields.size());
Iterator<IndexableField> fieldIterator = storedFields.iterator();
IndexableField field;
// Test that we retained the particular value ordering, even though though the 2nd of three was longest
assertTrue(fieldIterator.hasNext());
field = fieldIterator.next();
assertEquals(CAT_FLD, field.name());
assertTrue(field.stringValue().startsWith("STORED V1|"));
assertTrue(fieldIterator.hasNext());
field = fieldIterator.next();
assertEquals(CAT_FLD, field.name());
assertTrue(field.stringValue().startsWith("STORED V2|"));
assertTrue(fieldIterator.hasNext());
field = fieldIterator.next();
assertEquals(CAT_FLD, field.name());
assertTrue(field.stringValue().startsWith("STORED V3|"));
}
use of org.apache.solr.common.SolrInputDocument in project lucene-solr by apache.
the class DocumentBuilderTest method testSolrInputDocumentEquality.
public void testSolrInputDocumentEquality() {
String randomString = TestUtil.randomSimpleString(random());
SolrInputDocument doc1 = new SolrInputDocument();
doc1.addField("foo", randomString);
SolrInputDocument doc2 = new SolrInputDocument();
doc2.addField("foo", randomString);
assertTrue(compareSolrInputDocument(doc1, doc2));
doc1 = new SolrInputDocument();
doc1.addField("foo", randomString);
doc2 = new SolrInputDocument();
doc2.addField("foo", randomString);
SolrInputDocument childDoc = new SolrInputDocument();
childDoc.addField("foo", "bar");
doc1.addChildDocument(childDoc);
assertFalse(compareSolrInputDocument(doc1, doc2));
doc2.addChildDocument(childDoc);
assertTrue(compareSolrInputDocument(doc1, doc2));
SolrInputDocument childDoc1 = new SolrInputDocument();
childDoc.addField(TestUtil.randomSimpleString(random()), TestUtil.randomSimpleString(random()));
doc2.addChildDocument(childDoc1);
assertFalse(compareSolrInputDocument(doc1, doc2));
}
Aggregations