use of org.apache.solr.common.params.ModifiableSolrParams in project lucene-solr by apache.
the class TestMaxScoreQueryParser method parse.
//
// Helper methods
//
private Query parse(String q, String... params) {
try {
ModifiableSolrParams p = new ModifiableSolrParams();
ArrayList<String> al = new ArrayList<>(Arrays.asList(params));
while (al.size() >= 2) {
p.add(al.remove(0), al.remove(0));
}
return new MaxScoreQParser(q, p, new MapSolrParams(Collections.singletonMap("df", "text")), req(q)).parse();
} catch (SyntaxError syntaxError) {
fail("Failed with exception " + syntaxError.getMessage());
}
fail("Parse failed");
return null;
}
use of org.apache.solr.common.params.ModifiableSolrParams in project lucene-solr by apache.
the class TestPseudoReturnFields method testAugmentersGlobsExplicitAndScoreOhMy.
public void testAugmentersGlobsExplicitAndScoreOhMy() throws Exception {
Random random = random();
// NOTE: 'ssto' is the missing one
final List<String> fl = Arrays.asList("id", "[docid]", "[explain]", "score", "val_*", "subj*");
final int iters = atLeast(random, 10);
for (int i = 0; i < iters; i++) {
Collections.shuffle(fl, random);
final SolrParams singleFl = params("q", "*:*", "rows", "1", "fl", StringUtils.join(fl.toArray(), ','));
final ModifiableSolrParams multiFl = params("q", "*:*", "rows", "1");
for (String item : fl) {
multiFl.add("fl", item);
}
for (SolrParams p : Arrays.asList(singleFl, multiFl)) {
assertQ(p.toString(), req(p), "//result[@numFound='5']", "//result/doc/str[@name='id']", "//result/doc/float[@name='score']", "//result/doc/str[@name='subject']", "//result/doc/int[@name='val_i']", "//result/doc/int[@name='[docid]']", "//result/doc/str[@name='[explain]']", "//result/doc[count(*)=6]");
}
}
}
use of org.apache.solr.common.params.ModifiableSolrParams in project lucene-solr by apache.
the class TestPseudoReturnFields method testAugmentersGlobsExplicitAndScoreOhMyRTG.
public void testAugmentersGlobsExplicitAndScoreOhMyRTG() throws Exception {
Random random = random();
// NOTE: 'ssto' is the missing one
final List<String> fl = Arrays.asList("id", "[explain]", "score", "val_*", "subj*", "abs(val_i)", "[docid]");
final int iters = atLeast(random, 10);
for (int i = 0; i < iters; i++) {
Collections.shuffle(fl, random);
final SolrParams singleFl = params("fl", StringUtils.join(fl.toArray(), ','));
final ModifiableSolrParams multiFl = params();
for (String item : fl) {
multiFl.add("fl", item);
}
// RTG behavior should be consistent, (committed or otherwise)
for (String id : Arrays.asList("42", "99")) {
for (SolrParams p : Arrays.asList(singleFl, multiFl)) {
assertQ(id + ": " + p, req(p, "qt", "/get", "id", id, "wt", "xml"), "count(//doc)=1", "//doc/str[@name='id']", "//doc/int[@name='[docid]'][.>=-1]", "//doc/float[@name='abs(val_i)'][.='1.0']", // RTG: [explain] and score should be missing (ignored)
"//doc/int[@name='val_i'][.=1]", "//doc/str[@name='subject']", "//doc[count(*)=5]");
}
}
}
}
use of org.apache.solr.common.params.ModifiableSolrParams in project lucene-solr by apache.
the class TestCollapseQParserPlugin method testEmptyCollection.
@Test
public void testEmptyCollection() throws Exception {
// group_s is docValues=false and group_dv_s is docValues=true
String group = (random().nextBoolean() ? "group_s" : "group_s_dv");
// min-or-max is for CollapsingScoreCollector vs. CollapsingFieldValueCollector
String optional_min_or_max = (random().nextBoolean() ? "" : (random().nextBoolean() ? "min=field(test_i)" : "max=field(test_i)"));
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", "*:*");
params.add("fq", "{!collapse field=" + group + " " + optional_min_or_max + "}");
assertQ(req(params), "*[count(//doc)=0]");
}
use of org.apache.solr.common.params.ModifiableSolrParams in project lucene-solr by apache.
the class PeerSyncTest method test.
@Test
@ShardsFixed(num = 3)
public void test() throws Exception {
Set<Integer> docsAdded = new LinkedHashSet<>();
handle.clear();
handle.put("timestamp", SKIPVAL);
handle.put("score", SKIPVAL);
handle.put("maxScore", SKIPVAL);
SolrClient client0 = clients.get(0);
SolrClient client1 = clients.get(1);
SolrClient client2 = clients.get(2);
long v = 0;
add(client0, seenLeader, sdoc("id", "1", "_version_", ++v));
// this fails because client0 has no context (i.e. no updates of its own to judge if applying the updates
// from client1 will bring it into sync with client1)
assertSync(client1, numVersions, false, shardsArr[0]);
// bring client1 back into sync with client0 by adding the doc
add(client1, seenLeader, sdoc("id", "1", "_version_", v));
// both have the same version list, so sync should now return true
assertSync(client1, numVersions, true, shardsArr[0]);
// TODO: test that updates weren't necessary
client0.commit();
client1.commit();
queryAndCompare(params("q", "*:*"), client0, client1);
add(client0, seenLeader, addRandFields(sdoc("id", "2", "_version_", ++v)));
// now client1 has the context to sync
assertSync(client1, numVersions, true, shardsArr[0]);
client0.commit();
client1.commit();
queryAndCompare(params("q", "*:*"), client0, client1);
add(client0, seenLeader, addRandFields(sdoc("id", "3", "_version_", ++v)));
add(client0, seenLeader, addRandFields(sdoc("id", "4", "_version_", ++v)));
add(client0, seenLeader, addRandFields(sdoc("id", "5", "_version_", ++v)));
add(client0, seenLeader, addRandFields(sdoc("id", "6", "_version_", ++v)));
add(client0, seenLeader, addRandFields(sdoc("id", "7", "_version_", ++v)));
add(client0, seenLeader, addRandFields(sdoc("id", "8", "_version_", ++v)));
add(client0, seenLeader, addRandFields(sdoc("id", "9", "_version_", ++v)));
add(client0, seenLeader, addRandFields(sdoc("id", "10", "_version_", ++v)));
for (int i = 0; i < 10; i++) docsAdded.add(i + 1);
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
int toAdd = (int) (numVersions * .95);
for (int i = 0; i < toAdd; i++) {
add(client0, seenLeader, sdoc("id", Integer.toString(i + 11), "_version_", v + i + 1));
docsAdded.add(i + 11);
}
// sync should fail since there's not enough overlap to give us confidence
assertSync(client1, numVersions, false, shardsArr[0]);
// add some of the docs that were missing... just enough to give enough overlap
int toAdd2 = (int) (numVersions * .25);
for (int i = 0; i < toAdd2; i++) {
add(client1, seenLeader, sdoc("id", Integer.toString(i + 11), "_version_", v + i + 1));
}
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
// test delete and deleteByQuery
v = 1000;
SolrInputDocument doc = sdoc("id", "1000", "_version_", ++v);
add(client0, seenLeader, doc);
add(client0, seenLeader, sdoc("id", "1001", "_version_", ++v));
delQ(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", Long.toString(-++v)), "id:1001 OR id:1002");
add(client0, seenLeader, sdoc("id", "1002", "_version_", ++v));
del(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", Long.toString(-++v)), "1000");
// 1002 added
docsAdded.add(1002);
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
// test that delete by query is returned even if not requested, and that it doesn't delete newer stuff than it should
v = 2000;
SolrClient client = client0;
add(client, seenLeader, sdoc("id", "2000", "_version_", ++v));
add(client, seenLeader, sdoc("id", "2001", "_version_", ++v));
delQ(client, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", Long.toString(-++v)), "id:2001 OR id:2002");
add(client, seenLeader, sdoc("id", "2002", "_version_", ++v));
del(client, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", Long.toString(-++v)), "2000");
// 2002 added
docsAdded.add(2002);
v = 2000;
client = client1;
add(client, seenLeader, sdoc("id", "2000", "_version_", ++v));
// pretend we missed the add of 2001. peersync should retrieve it, but should also retrieve any deleteByQuery objects after it
++v;
// add(client, seenLeader, sdoc("id","2001","_version_",++v));
delQ(client, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", Long.toString(-++v)), "id:2001 OR id:2002");
add(client, seenLeader, sdoc("id", "2002", "_version_", ++v));
del(client, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", Long.toString(-++v)), "2000");
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
//
// Test that handling reorders work when applying docs retrieved from peer
//
// this should cause us to retrieve the delete (but not the following add)
// the reorder in application shouldn't affect anything
add(client0, seenLeader, sdoc("id", "3000", "_version_", 3001));
add(client1, seenLeader, sdoc("id", "3000", "_version_", 3001));
del(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "3000"), "3000");
docsAdded.add(3000);
// this should cause us to retrieve an add tha was previously deleted
add(client0, seenLeader, sdoc("id", "3001", "_version_", 3003));
del(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "3001"), "3004");
del(client1, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "3001"), "3004");
// this should cause us to retrieve an older add that was overwritten
add(client0, seenLeader, sdoc("id", "3002", "_version_", 3004));
add(client0, seenLeader, sdoc("id", "3002", "_version_", 3005));
add(client1, seenLeader, sdoc("id", "3002", "_version_", 3005));
// 3001 added
docsAdded.add(3001);
// 3002 added
docsAdded.add(3002);
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
// now lets check fingerprinting causes appropriate fails
v = 4000;
add(client0, seenLeader, sdoc("id", Integer.toString((int) v), "_version_", v));
docsAdded.add(4000);
toAdd = numVersions + 10;
for (int i = 0; i < toAdd; i++) {
add(client0, seenLeader, sdoc("id", Integer.toString((int) v + i + 1), "_version_", v + i + 1));
add(client1, seenLeader, sdoc("id", Integer.toString((int) v + i + 1), "_version_", v + i + 1));
docsAdded.add((int) v + i + 1);
}
// client0 now has an additional add beyond our window and the fingerprint should cause this to fail
assertSync(client1, numVersions, false, shardsArr[0]);
// if we turn of fingerprinting, it should succeed
System.setProperty("solr.disableFingerprint", "true");
try {
assertSync(client1, numVersions, true, shardsArr[0]);
} finally {
System.clearProperty("solr.disableFingerprint");
}
// lets add the missing document and verify that order doesn't matter
add(client1, seenLeader, sdoc("id", Integer.toString((int) v), "_version_", v));
assertSync(client1, numVersions, true, shardsArr[0]);
// lets do some overwrites to ensure that repeated updates and maxDoc don't matter
for (int i = 0; i < 10; i++) {
add(client0, seenLeader, sdoc("id", Integer.toString((int) v + i + 1), "_version_", v + i + 1));
}
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
// lets add some in-place updates
// full update
add(client0, seenLeader, sdoc("id", "5000", "val_i_dvo", 0, "title", "mytitle", "_version_", 5000));
docsAdded.add(5000);
assertSync(client1, numVersions, true, shardsArr[0]);
// verify the in-place updated document (id=5000) has correct fields
assertEquals(0, client1.getById("5000").get("val_i_dvo"));
assertEquals(client0.getById("5000") + " and " + client1.getById("5000"), "mytitle", client1.getById("5000").getFirstValue("title"));
ModifiableSolrParams inPlaceParams = new ModifiableSolrParams(seenLeader);
inPlaceParams.set(DistributedUpdateProcessor.DISTRIB_INPLACE_PREVVERSION, "5000");
// in-place update
add(client0, inPlaceParams, sdoc("id", "5000", "val_i_dvo", 1, "_version_", 5001));
assertSync(client1, numVersions, true, shardsArr[0]);
// verify the in-place updated document (id=5000) has correct fields
assertEquals(1, client1.getById("5000").get("val_i_dvo"));
assertEquals(client0.getById("5000") + " and " + client1.getById("5000"), "mytitle", client1.getById("5000").getFirstValue("title"));
// interleave the in-place updates with a few deletes to other documents
del(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "5002"), 4001);
delQ(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "5003"), "id:4002");
docsAdded.remove(4001);
docsAdded.remove(4002);
inPlaceParams.set(DistributedUpdateProcessor.DISTRIB_INPLACE_PREVVERSION, "5001");
// in-place update
add(client0, inPlaceParams, sdoc("id", 5000, "val_i_dvo", 2, "_version_", 5004));
assertSync(client1, numVersions, true, shardsArr[0]);
// verify the in-place updated document (id=5000) has correct fields
assertEquals(2, client1.getById("5000").get("val_i_dvo"));
assertEquals(client0.getById("5000") + " and " + client1.getById("5000"), "mytitle", client1.getById("5000").getFirstValue("title"));
// a DBQ with value
// current val is 2, so this should not delete anything
delQ(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "5005"), "val_i_dvo:1");
assertSync(client1, numVersions, true, shardsArr[0]);
// full update
add(client0, seenLeader, sdoc("id", "5000", "val_i_dvo", 0, "title", "mytitle", "_version_", 5000));
docsAdded.add(5000);
assertSync(client1, numVersions, true, shardsArr[0]);
inPlaceParams.set(DistributedUpdateProcessor.DISTRIB_INPLACE_PREVVERSION, "5004");
add(client0, inPlaceParams, sdoc("id", 5000, "val_i_dvo", 3, "_version_", 5006));
assertSync(client1, numVersions, true, shardsArr[0]);
// verify the in-place updated document (id=5000) has correct fields
assertEquals(3, client1.getById("5000").get("val_i_dvo"));
assertEquals(client0.getById("5000") + " and " + client1.getById("5000"), "mytitle", client1.getById("5000").getFirstValue("title"));
validateDocs(docsAdded, client0, client1);
del(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "5007"), 5000);
docsAdded.remove(5000);
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
// if doc with id=6000 is deleted, further in-place-updates should fail
// full update
add(client0, seenLeader, sdoc("id", "6000", "val_i_dvo", 6, "title", "mytitle", "_version_", 6000));
// current val is 6000, this will delete id=6000
delQ(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "6004"), "val_i_dvo:6");
assertSync(client1, numVersions, true, shardsArr[0]);
SolrException ex = expectThrows(SolrException.class, () -> {
inPlaceParams.set(DistributedUpdateProcessor.DISTRIB_INPLACE_PREVVERSION, "6000");
add(client0, inPlaceParams, sdoc("id", 6000, "val_i_dvo", 6003, "_version_", 5007));
});
assertEquals(ex.toString(), SolrException.ErrorCode.SERVER_ERROR.code, ex.code());
assertThat(ex.getMessage(), containsString("Can't find document with id=6000"));
// Reordered DBQ with Child-nodes (SOLR-10114)
docsAdded.clear();
// Reordered full delete should not delete child-docs
// add with later version
add(client0, seenLeader, sdocWithChildren(7001, "7001", 2));
docsAdded.add(7001);
docsAdded.add(7001001);
docsAdded.add(7001002);
// reordered delete
delQ(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "7000"), "id:*");
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
// Reordered DBQ should not affect update
// add with later version
add(client0, seenLeader, sdocWithChildren(8000, "8000", 5));
// not found, arrives earlier
delQ(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "8002"), "id:8500");
// update with two childs
add(client0, seenLeader, sdocWithChildren(8000, "8001", 2));
docsAdded.add(8000);
docsAdded.add(8000001);
docsAdded.add(8000002);
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
}
Aggregations