use of org.apache.solr.common.params.ModifiableSolrParams in project lucene-solr by apache.
the class TestCollapseQParserPlugin method testEmptyCollection.
@Test
public void testEmptyCollection() throws Exception {
// group_s is docValues=false and group_dv_s is docValues=true
String group = (random().nextBoolean() ? "group_s" : "group_s_dv");
// min-or-max is for CollapsingScoreCollector vs. CollapsingFieldValueCollector
String optional_min_or_max = (random().nextBoolean() ? "" : (random().nextBoolean() ? "min=field(test_i)" : "max=field(test_i)"));
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", "*:*");
params.add("fq", "{!collapse field=" + group + " " + optional_min_or_max + "}");
assertQ(req(params), "*[count(//doc)=0]");
}
use of org.apache.solr.common.params.ModifiableSolrParams in project lucene-solr by apache.
the class PeerSyncTest method test.
@Test
@ShardsFixed(num = 3)
public void test() throws Exception {
Set<Integer> docsAdded = new LinkedHashSet<>();
handle.clear();
handle.put("timestamp", SKIPVAL);
handle.put("score", SKIPVAL);
handle.put("maxScore", SKIPVAL);
SolrClient client0 = clients.get(0);
SolrClient client1 = clients.get(1);
SolrClient client2 = clients.get(2);
long v = 0;
add(client0, seenLeader, sdoc("id", "1", "_version_", ++v));
// this fails because client0 has no context (i.e. no updates of its own to judge if applying the updates
// from client1 will bring it into sync with client1)
assertSync(client1, numVersions, false, shardsArr[0]);
// bring client1 back into sync with client0 by adding the doc
add(client1, seenLeader, sdoc("id", "1", "_version_", v));
// both have the same version list, so sync should now return true
assertSync(client1, numVersions, true, shardsArr[0]);
// TODO: test that updates weren't necessary
client0.commit();
client1.commit();
queryAndCompare(params("q", "*:*"), client0, client1);
add(client0, seenLeader, addRandFields(sdoc("id", "2", "_version_", ++v)));
// now client1 has the context to sync
assertSync(client1, numVersions, true, shardsArr[0]);
client0.commit();
client1.commit();
queryAndCompare(params("q", "*:*"), client0, client1);
add(client0, seenLeader, addRandFields(sdoc("id", "3", "_version_", ++v)));
add(client0, seenLeader, addRandFields(sdoc("id", "4", "_version_", ++v)));
add(client0, seenLeader, addRandFields(sdoc("id", "5", "_version_", ++v)));
add(client0, seenLeader, addRandFields(sdoc("id", "6", "_version_", ++v)));
add(client0, seenLeader, addRandFields(sdoc("id", "7", "_version_", ++v)));
add(client0, seenLeader, addRandFields(sdoc("id", "8", "_version_", ++v)));
add(client0, seenLeader, addRandFields(sdoc("id", "9", "_version_", ++v)));
add(client0, seenLeader, addRandFields(sdoc("id", "10", "_version_", ++v)));
for (int i = 0; i < 10; i++) docsAdded.add(i + 1);
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
int toAdd = (int) (numVersions * .95);
for (int i = 0; i < toAdd; i++) {
add(client0, seenLeader, sdoc("id", Integer.toString(i + 11), "_version_", v + i + 1));
docsAdded.add(i + 11);
}
// sync should fail since there's not enough overlap to give us confidence
assertSync(client1, numVersions, false, shardsArr[0]);
// add some of the docs that were missing... just enough to give enough overlap
int toAdd2 = (int) (numVersions * .25);
for (int i = 0; i < toAdd2; i++) {
add(client1, seenLeader, sdoc("id", Integer.toString(i + 11), "_version_", v + i + 1));
}
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
// test delete and deleteByQuery
v = 1000;
SolrInputDocument doc = sdoc("id", "1000", "_version_", ++v);
add(client0, seenLeader, doc);
add(client0, seenLeader, sdoc("id", "1001", "_version_", ++v));
delQ(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", Long.toString(-++v)), "id:1001 OR id:1002");
add(client0, seenLeader, sdoc("id", "1002", "_version_", ++v));
del(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", Long.toString(-++v)), "1000");
// 1002 added
docsAdded.add(1002);
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
// test that delete by query is returned even if not requested, and that it doesn't delete newer stuff than it should
v = 2000;
SolrClient client = client0;
add(client, seenLeader, sdoc("id", "2000", "_version_", ++v));
add(client, seenLeader, sdoc("id", "2001", "_version_", ++v));
delQ(client, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", Long.toString(-++v)), "id:2001 OR id:2002");
add(client, seenLeader, sdoc("id", "2002", "_version_", ++v));
del(client, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", Long.toString(-++v)), "2000");
// 2002 added
docsAdded.add(2002);
v = 2000;
client = client1;
add(client, seenLeader, sdoc("id", "2000", "_version_", ++v));
// pretend we missed the add of 2001. peersync should retrieve it, but should also retrieve any deleteByQuery objects after it
++v;
// add(client, seenLeader, sdoc("id","2001","_version_",++v));
delQ(client, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", Long.toString(-++v)), "id:2001 OR id:2002");
add(client, seenLeader, sdoc("id", "2002", "_version_", ++v));
del(client, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", Long.toString(-++v)), "2000");
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
//
// Test that handling reorders work when applying docs retrieved from peer
//
// this should cause us to retrieve the delete (but not the following add)
// the reorder in application shouldn't affect anything
add(client0, seenLeader, sdoc("id", "3000", "_version_", 3001));
add(client1, seenLeader, sdoc("id", "3000", "_version_", 3001));
del(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "3000"), "3000");
docsAdded.add(3000);
// this should cause us to retrieve an add tha was previously deleted
add(client0, seenLeader, sdoc("id", "3001", "_version_", 3003));
del(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "3001"), "3004");
del(client1, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "3001"), "3004");
// this should cause us to retrieve an older add that was overwritten
add(client0, seenLeader, sdoc("id", "3002", "_version_", 3004));
add(client0, seenLeader, sdoc("id", "3002", "_version_", 3005));
add(client1, seenLeader, sdoc("id", "3002", "_version_", 3005));
// 3001 added
docsAdded.add(3001);
// 3002 added
docsAdded.add(3002);
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
// now lets check fingerprinting causes appropriate fails
v = 4000;
add(client0, seenLeader, sdoc("id", Integer.toString((int) v), "_version_", v));
docsAdded.add(4000);
toAdd = numVersions + 10;
for (int i = 0; i < toAdd; i++) {
add(client0, seenLeader, sdoc("id", Integer.toString((int) v + i + 1), "_version_", v + i + 1));
add(client1, seenLeader, sdoc("id", Integer.toString((int) v + i + 1), "_version_", v + i + 1));
docsAdded.add((int) v + i + 1);
}
// client0 now has an additional add beyond our window and the fingerprint should cause this to fail
assertSync(client1, numVersions, false, shardsArr[0]);
// if we turn of fingerprinting, it should succeed
System.setProperty("solr.disableFingerprint", "true");
try {
assertSync(client1, numVersions, true, shardsArr[0]);
} finally {
System.clearProperty("solr.disableFingerprint");
}
// lets add the missing document and verify that order doesn't matter
add(client1, seenLeader, sdoc("id", Integer.toString((int) v), "_version_", v));
assertSync(client1, numVersions, true, shardsArr[0]);
// lets do some overwrites to ensure that repeated updates and maxDoc don't matter
for (int i = 0; i < 10; i++) {
add(client0, seenLeader, sdoc("id", Integer.toString((int) v + i + 1), "_version_", v + i + 1));
}
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
// lets add some in-place updates
// full update
add(client0, seenLeader, sdoc("id", "5000", "val_i_dvo", 0, "title", "mytitle", "_version_", 5000));
docsAdded.add(5000);
assertSync(client1, numVersions, true, shardsArr[0]);
// verify the in-place updated document (id=5000) has correct fields
assertEquals(0, client1.getById("5000").get("val_i_dvo"));
assertEquals(client0.getById("5000") + " and " + client1.getById("5000"), "mytitle", client1.getById("5000").getFirstValue("title"));
ModifiableSolrParams inPlaceParams = new ModifiableSolrParams(seenLeader);
inPlaceParams.set(DistributedUpdateProcessor.DISTRIB_INPLACE_PREVVERSION, "5000");
// in-place update
add(client0, inPlaceParams, sdoc("id", "5000", "val_i_dvo", 1, "_version_", 5001));
assertSync(client1, numVersions, true, shardsArr[0]);
// verify the in-place updated document (id=5000) has correct fields
assertEquals(1, client1.getById("5000").get("val_i_dvo"));
assertEquals(client0.getById("5000") + " and " + client1.getById("5000"), "mytitle", client1.getById("5000").getFirstValue("title"));
// interleave the in-place updates with a few deletes to other documents
del(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "5002"), 4001);
delQ(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "5003"), "id:4002");
docsAdded.remove(4001);
docsAdded.remove(4002);
inPlaceParams.set(DistributedUpdateProcessor.DISTRIB_INPLACE_PREVVERSION, "5001");
// in-place update
add(client0, inPlaceParams, sdoc("id", 5000, "val_i_dvo", 2, "_version_", 5004));
assertSync(client1, numVersions, true, shardsArr[0]);
// verify the in-place updated document (id=5000) has correct fields
assertEquals(2, client1.getById("5000").get("val_i_dvo"));
assertEquals(client0.getById("5000") + " and " + client1.getById("5000"), "mytitle", client1.getById("5000").getFirstValue("title"));
// a DBQ with value
// current val is 2, so this should not delete anything
delQ(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "5005"), "val_i_dvo:1");
assertSync(client1, numVersions, true, shardsArr[0]);
// full update
add(client0, seenLeader, sdoc("id", "5000", "val_i_dvo", 0, "title", "mytitle", "_version_", 5000));
docsAdded.add(5000);
assertSync(client1, numVersions, true, shardsArr[0]);
inPlaceParams.set(DistributedUpdateProcessor.DISTRIB_INPLACE_PREVVERSION, "5004");
add(client0, inPlaceParams, sdoc("id", 5000, "val_i_dvo", 3, "_version_", 5006));
assertSync(client1, numVersions, true, shardsArr[0]);
// verify the in-place updated document (id=5000) has correct fields
assertEquals(3, client1.getById("5000").get("val_i_dvo"));
assertEquals(client0.getById("5000") + " and " + client1.getById("5000"), "mytitle", client1.getById("5000").getFirstValue("title"));
validateDocs(docsAdded, client0, client1);
del(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "5007"), 5000);
docsAdded.remove(5000);
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
// if doc with id=6000 is deleted, further in-place-updates should fail
// full update
add(client0, seenLeader, sdoc("id", "6000", "val_i_dvo", 6, "title", "mytitle", "_version_", 6000));
// current val is 6000, this will delete id=6000
delQ(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "6004"), "val_i_dvo:6");
assertSync(client1, numVersions, true, shardsArr[0]);
SolrException ex = expectThrows(SolrException.class, () -> {
inPlaceParams.set(DistributedUpdateProcessor.DISTRIB_INPLACE_PREVVERSION, "6000");
add(client0, inPlaceParams, sdoc("id", 6000, "val_i_dvo", 6003, "_version_", 5007));
});
assertEquals(ex.toString(), SolrException.ErrorCode.SERVER_ERROR.code, ex.code());
assertThat(ex.getMessage(), containsString("Can't find document with id=6000"));
// Reordered DBQ with Child-nodes (SOLR-10114)
docsAdded.clear();
// Reordered full delete should not delete child-docs
// add with later version
add(client0, seenLeader, sdocWithChildren(7001, "7001", 2));
docsAdded.add(7001);
docsAdded.add(7001001);
docsAdded.add(7001002);
// reordered delete
delQ(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "7000"), "id:*");
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
// Reordered DBQ should not affect update
// add with later version
add(client0, seenLeader, sdocWithChildren(8000, "8000", 5));
// not found, arrives earlier
delQ(client0, params(DISTRIB_UPDATE_PARAM, FROM_LEADER, "_version_", "8002"), "id:8500");
// update with two childs
add(client0, seenLeader, sdocWithChildren(8000, "8001", 2));
docsAdded.add(8000);
docsAdded.add(8000001);
docsAdded.add(8000002);
assertSync(client1, numVersions, true, shardsArr[0]);
validateDocs(docsAdded, client0, client1);
}
use of org.apache.solr.common.params.ModifiableSolrParams in project lucene-solr by apache.
the class SolrCmdDistributorTest method testMaxRetries.
private void testMaxRetries() throws IOException {
final MockStreamingSolrClients streamingClients = new MockStreamingSolrClients(updateShardHandler);
SolrCmdDistributor cmdDistrib = new SolrCmdDistributor(streamingClients, 5, 0);
streamingClients.setExp(Exp.CONNECT_EXCEPTION);
ArrayList<Node> nodes = new ArrayList<>();
final HttpSolrClient solrclient1 = (HttpSolrClient) clients.get(0);
final AtomicInteger retries = new AtomicInteger();
ZkNodeProps nodeProps = new ZkNodeProps(ZkStateReader.BASE_URL_PROP, solrclient1.getBaseURL(), ZkStateReader.CORE_NAME_PROP, "");
RetryNode retryNode = new RetryNode(new ZkCoreNodeProps(nodeProps), null, "collection1", "shard1") {
@Override
public boolean checkRetry() {
retries.incrementAndGet();
return true;
}
};
nodes.add(retryNode);
AddUpdateCommand cmd = new AddUpdateCommand(null);
cmd.solrDoc = sdoc("id", id.incrementAndGet());
ModifiableSolrParams params = new ModifiableSolrParams();
cmdDistrib.distribAdd(cmd, nodes, params);
cmdDistrib.finish();
assertEquals(6, retries.get());
assertEquals(1, cmdDistrib.getErrors().size());
}
use of org.apache.solr.common.params.ModifiableSolrParams in project lucene-solr by apache.
the class SolrCmdDistributorTest method testOneRetry.
private void testOneRetry() throws Exception {
final HttpSolrClient solrclient = (HttpSolrClient) clients.get(0);
long numFoundBefore = solrclient.query(new SolrQuery("*:*")).getResults().getNumFound();
final MockStreamingSolrClients streamingClients = new MockStreamingSolrClients(updateShardHandler);
SolrCmdDistributor cmdDistrib = new SolrCmdDistributor(streamingClients, 5, 0);
streamingClients.setExp(Exp.CONNECT_EXCEPTION);
ArrayList<Node> nodes = new ArrayList<>();
ZkNodeProps nodeProps = new ZkNodeProps(ZkStateReader.BASE_URL_PROP, solrclient.getBaseURL(), ZkStateReader.CORE_NAME_PROP, "");
final AtomicInteger retries = new AtomicInteger();
nodeProps = new ZkNodeProps(ZkStateReader.BASE_URL_PROP, solrclient.getBaseURL(), ZkStateReader.CORE_NAME_PROP, "");
RetryNode retryNode = new RetryNode(new ZkCoreNodeProps(nodeProps), null, "collection1", "shard1") {
@Override
public boolean checkRetry() {
streamingClients.setExp(null);
retries.incrementAndGet();
return true;
}
};
nodes.add(retryNode);
AddUpdateCommand cmd = new AddUpdateCommand(null);
cmd.solrDoc = sdoc("id", id.incrementAndGet());
ModifiableSolrParams params = new ModifiableSolrParams();
CommitUpdateCommand ccmd = new CommitUpdateCommand(null, false);
cmdDistrib.distribAdd(cmd, nodes, params);
cmdDistrib.distribCommit(ccmd, nodes, params);
cmdDistrib.finish();
assertEquals(1, retries.get());
long numFoundAfter = solrclient.query(new SolrQuery("*:*")).getResults().getNumFound();
// we will get java.net.ConnectException which we retry on
assertEquals(numFoundBefore + 1, numFoundAfter);
assertEquals(0, cmdDistrib.getErrors().size());
}
use of org.apache.solr.common.params.ModifiableSolrParams in project lucene-solr by apache.
the class HttpClientUtil method createClient.
public static CloseableHttpClient createClient(final SolrParams params, PoolingHttpClientConnectionManager cm, boolean sharedConnectionManager, HttpRequestExecutor httpRequestExecutor) {
final ModifiableSolrParams config = new ModifiableSolrParams(params);
if (logger.isDebugEnabled()) {
logger.debug("Creating new http client, config:" + config);
}
cm.setMaxTotal(params.getInt(HttpClientUtil.PROP_MAX_CONNECTIONS, 10000));
cm.setDefaultMaxPerRoute(params.getInt(HttpClientUtil.PROP_MAX_CONNECTIONS_PER_HOST, 10000));
cm.setValidateAfterInactivity(Integer.getInteger(VALIDATE_AFTER_INACTIVITY, VALIDATE_AFTER_INACTIVITY_DEFAULT));
HttpClientBuilder newHttpClientBuilder = HttpClientBuilder.create();
if (sharedConnectionManager) {
newHttpClientBuilder.setConnectionManagerShared(true);
} else {
newHttpClientBuilder.setConnectionManagerShared(false);
}
ConnectionKeepAliveStrategy keepAliveStrat = new ConnectionKeepAliveStrategy() {
@Override
public long getKeepAliveDuration(HttpResponse response, HttpContext context) {
// we only close connections based on idle time, not ttl expiration
return -1;
}
};
if (httpClientBuilder.getAuthSchemeRegistryProvider() != null) {
newHttpClientBuilder.setDefaultAuthSchemeRegistry(httpClientBuilder.getAuthSchemeRegistryProvider().getAuthSchemeRegistry());
}
if (httpClientBuilder.getCookieSpecRegistryProvider() != null) {
newHttpClientBuilder.setDefaultCookieSpecRegistry(httpClientBuilder.getCookieSpecRegistryProvider().getCookieSpecRegistry());
}
if (httpClientBuilder.getCredentialsProviderProvider() != null) {
newHttpClientBuilder.setDefaultCredentialsProvider(httpClientBuilder.getCredentialsProviderProvider().getCredentialsProvider());
}
newHttpClientBuilder.addInterceptorLast(new DynamicInterceptor());
newHttpClientBuilder = newHttpClientBuilder.setKeepAliveStrategy(keepAliveStrat).evictIdleConnections((long) Integer.getInteger(EVICT_IDLE_CONNECTIONS, EVICT_IDLE_CONNECTIONS_DEFAULT), TimeUnit.MILLISECONDS);
if (httpRequestExecutor != null) {
newHttpClientBuilder.setRequestExecutor(httpRequestExecutor);
}
HttpClientBuilder builder = setupBuilder(newHttpClientBuilder, params);
HttpClient httpClient = builder.setConnectionManager(cm).build();
assert ObjectReleaseTracker.track(httpClient);
return (CloseableHttpClient) httpClient;
}
Aggregations