use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.
the class HttpSolrCall method init.
protected void init() throws Exception {
// check for management path
String alternate = cores.getManagementPath();
if (alternate != null && path.startsWith(alternate)) {
path = path.substring(0, alternate.length());
}
// unused feature ?
int idx = path.indexOf(':');
if (idx > 0) {
// save the portion after the ':' for a 'handler' path parameter
path = path.substring(0, idx);
}
boolean usingAliases = false;
// Check for container handlers
handler = cores.getRequestHandler(path);
if (handler != null) {
solrReq = SolrRequestParsers.DEFAULT.parse(null, path, req);
solrReq.getContext().put(CoreContainer.class.getName(), cores);
requestType = RequestType.ADMIN;
action = ADMIN;
return;
} else {
//otherwise, we should find a core from the path
idx = path.indexOf("/", 1);
if (idx > 1) {
// try to get the corename as a request parameter first
corename = path.substring(1, idx);
// look at aliases
if (cores.isZooKeeperAware()) {
origCorename = corename;
ZkStateReader reader = cores.getZkController().getZkStateReader();
aliases = reader.getAliases();
if (aliases != null && aliases.collectionAliasSize() > 0) {
usingAliases = true;
String alias = aliases.getCollectionAlias(corename);
if (alias != null) {
collectionsList = StrUtils.splitSmart(alias, ",", true);
corename = collectionsList.get(0);
}
}
}
core = cores.getCore(corename);
if (core != null) {
path = path.substring(idx);
} else if (cores.isCoreLoading(corename)) {
// extra mem barriers, so don't look at this before trying to get core
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "SolrCore is loading");
} else {
// the core may have just finished loading
core = cores.getCore(corename);
if (core != null) {
path = path.substring(idx);
}
}
}
if (core == null) {
if (!cores.isZooKeeperAware()) {
core = cores.getCore("");
}
}
}
if (core == null && cores.isZooKeeperAware()) {
// we couldn't find the core - lets make sure a collection was not specified instead
boolean isPreferLeader = false;
if (path.endsWith("/update") || path.contains("/update/")) {
isPreferLeader = true;
}
core = getCoreByCollection(corename, isPreferLeader);
if (core != null) {
// we found a core, update the path
path = path.substring(idx);
if (collectionsList == null)
collectionsList = new ArrayList<>();
collectionsList.add(corename);
}
// if we couldn't find it locally, look on other nodes
extractRemotePath(corename, origCorename, idx);
if (action != null)
return;
//core is not available locally or remotely
autoCreateSystemColl(corename);
if (action != null)
return;
}
// With a valid core...
if (core != null) {
MDCLoggingContext.setCore(core);
config = core.getSolrConfig();
// get or create/cache the parser for the core
SolrRequestParsers parser = config.getRequestParsers();
// Determine the handler from the url path if not set
// (we might already have selected the cores handler)
extractHandlerFromURLPath(parser);
if (action != null)
return;
// With a valid handler and a valid core...
if (handler != null) {
// if not a /select, create the request
if (solrReq == null) {
solrReq = parser.parse(core, path, req);
}
if (usingAliases) {
processAliases(aliases, collectionsList);
}
action = PROCESS;
// we are done with a valid handler
return;
}
}
log.debug("no handler or core retrieved for " + path + ", follow through...");
action = PASSTHROUGH;
}
use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.
the class HttpSolrCall method lookupAliases.
protected String lookupAliases(String collName) {
ZkStateReader reader = cores.getZkController().getZkStateReader();
aliases = reader.getAliases();
if (aliases != null && aliases.collectionAliasSize() > 0) {
usingAliases = true;
String alias = aliases.getCollectionAlias(collName);
if (alias != null) {
collectionsList = StrUtils.splitSmart(alias, ",", true);
return collectionsList.get(0);
}
}
return null;
}
use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.
the class BasicDistributedZkTest method test.
@Test
@ShardsFixed(num = 4)
public void test() throws Exception {
// setLoggingLevel(null);
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
// make sure we have leaders for each shard
for (int j = 1; j < sliceCount; j++) {
zkStateReader.getLeaderRetry(DEFAULT_COLLECTION, "shard" + j, 10000);
}
// make sure we again have leaders for each shard
waitForRecoveriesToFinish(false);
handle.clear();
handle.put("timestamp", SKIPVAL);
del("*:*");
queryAndCompareShards(params("q", "*:*", "distrib", "false", "sanity_check", "is_empty"));
// ask every individual replica of every shard to update+commit the same doc id
// with an incrementing counter on each update+commit
int foo_i_counter = 0;
for (SolrClient client : clients) {
foo_i_counter++;
indexDoc(// SOLR-4923
client, // SOLR-4923
params("commit", "true"), sdoc(id, 1, i1, 100, tlong, 100, "foo_i", foo_i_counter));
// after every update+commit, check all the shards consistency
queryAndCompareShards(params("q", "id:1", "distrib", "false", "sanity_check", "non_distrib_id_1_lookup"));
queryAndCompareShards(params("q", "id:1", "sanity_check", "distrib_id_1_lookup"));
}
indexr(id, 1, i1, 100, tlong, 100, t1, "now is the time for all good men", "foo_f", 1.414f, "foo_b", "true", "foo_d", 1.414d);
indexr(id, 2, i1, 50, tlong, 50, t1, "to come to the aid of their country.");
indexr(id, 3, i1, 2, tlong, 2, t1, "how now brown cow");
indexr(id, 4, i1, -100, tlong, 101, t1, "the quick fox jumped over the lazy dog");
indexr(id, 5, i1, 500, tlong, 500, t1, "the quick fox jumped way over the lazy dog");
indexr(id, 6, i1, -600, tlong, 600, t1, "humpty dumpy sat on a wall");
indexr(id, 7, i1, 123, tlong, 123, t1, "humpty dumpy had a great fall");
indexr(id, 8, i1, 876, tlong, 876, t1, "all the kings horses and all the kings men");
indexr(id, 9, i1, 7, tlong, 7, t1, "couldn't put humpty together again");
indexr(id, 10, i1, 4321, tlong, 4321, t1, "this too shall pass");
indexr(id, 11, i1, -987, tlong, 987, t1, "An eye for eye only ends up making the whole world blind.");
indexr(id, 12, i1, 379, tlong, 379, t1, "Great works are performed, not by strength, but by perseverance.");
indexr(id, 13, i1, 232, tlong, 232, t1, "no eggs on wall, lesson learned", oddField, "odd man out");
indexr(id, 14, "SubjectTerms_mfacet", new String[] { "mathematical models", "mathematical analysis" });
indexr(id, 15, "SubjectTerms_mfacet", new String[] { "test 1", "test 2", "test3" });
indexr(id, 16, "SubjectTerms_mfacet", new String[] { "test 1", "test 2", "test3" });
String[] vals = new String[100];
for (int i = 0; i < 100; i++) {
vals[i] = "test " + i;
}
indexr(id, 17, "SubjectTerms_mfacet", vals);
for (int i = 100; i < 150; i++) {
indexr(id, i);
}
commit();
queryAndCompareShards(params("q", "*:*", "sort", "id desc", "distrib", "false", "sanity_check", "is_empty"));
// random value sort
for (String f : fieldNames) {
query(false, new String[] { "q", "*:*", "sort", f + " desc" });
query(false, new String[] { "q", "*:*", "sort", f + " asc" });
}
// these queries should be exactly ordered and scores should exactly match
query(false, new String[] { "q", "*:*", "sort", i1 + " desc" });
query(false, new String[] { "q", "*:*", "sort", i1 + " asc" });
query(false, new String[] { "q", "*:*", "sort", i1 + " desc", "fl", "*,score" });
query(false, new String[] { "q", "*:*", "sort", "n_tl1 asc", "fl", "*,score" });
query(false, new String[] { "q", "*:*", "sort", "n_tl1 desc" });
handle.put("maxScore", SKIPVAL);
// does not expect maxScore. So if it comes ,ignore it. JavaBinCodec.writeSolrDocumentList()
query(false, new String[] { "q", "{!func}" + i1 });
//is agnostic of request params.
handle.remove("maxScore");
// even scores should match exactly here
query(false, new String[] { "q", "{!func}" + i1, "fl", "*,score" });
handle.put("highlighting", UNORDERED);
handle.put("response", UNORDERED);
handle.put("maxScore", SKIPVAL);
query(false, new String[] { "q", "quick" });
query(false, new String[] { "q", "all", "fl", "id", "start", "0" });
// no fields in returned docs
query(false, new String[] { "q", "all", "fl", "foofoofoo", "start", "0" });
query(false, new String[] { "q", "all", "fl", "id", "start", "100" });
handle.put("score", SKIPVAL);
query(false, new String[] { "q", "quick", "fl", "*,score" });
query(false, new String[] { "q", "all", "fl", "*,score", "start", "1" });
query(false, new String[] { "q", "all", "fl", "*,score", "start", "100" });
query(false, new String[] { "q", "now their fox sat had put", "fl", "*,score", "hl", "true", "hl.fl", t1 });
query(false, new String[] { "q", "now their fox sat had put", "fl", "foofoofoo", "hl", "true", "hl.fl", t1 });
query(false, new String[] { "q", "matchesnothing", "fl", "*,score" });
query(false, new Object[] { "q", "*:*", "rows", 100, "facet", "true", "facet.field", t1 });
query(false, new Object[] { "q", "*:*", "rows", 100, "facet", "true", "facet.field", t1, "facet.limit", -1, "facet.sort", "count" });
query(false, new Object[] { "q", "*:*", "rows", 100, "facet", "true", "facet.field", t1, "facet.limit", -1, "facet.sort", "count", "facet.mincount", 2 });
query(false, new Object[] { "q", "*:*", "rows", 100, "facet", "true", "facet.field", t1, "facet.limit", -1, "facet.sort", "index" });
query(false, new Object[] { "q", "*:*", "rows", 100, "facet", "true", "facet.field", t1, "facet.limit", -1, "facet.sort", "index", "facet.mincount", 2 });
query(false, new Object[] { "q", "*:*", "rows", 100, "facet", "true", "facet.field", t1, "facet.limit", 1 });
query(false, new Object[] { "q", "*:*", "rows", 100, "facet", "true", "facet.query", "quick", "facet.query", "all", "facet.query", "*:*" });
query(false, new Object[] { "q", "*:*", "rows", 100, "facet", "true", "facet.field", t1, "facet.offset", 1 });
query(false, new Object[] { "q", "*:*", "rows", 100, "facet", "true", "facet.field", t1, "facet.mincount", 2 });
// test faceting multiple things at once
query(false, new Object[] { "q", "*:*", "rows", 100, "facet", "true", "facet.query", "quick", "facet.query", "all", "facet.query", "*:*", "facet.field", t1 });
// test filter tagging, facet exclusion, and naming (multi-select facet support)
query(false, new Object[] { "q", "*:*", "rows", 100, "facet", "true", "facet.query", "{!key=myquick}quick", "facet.query", "{!key=myall ex=a}all", "facet.query", "*:*", "facet.field", "{!key=mykey ex=a}" + t1, "facet.field", "{!key=other ex=b}" + t1, "facet.field", "{!key=again ex=a,b}" + t1, "facet.field", t1, "fq", "{!tag=a}id:[1 TO 7]", "fq", "{!tag=b}id:[3 TO 9]" });
query(false, new Object[] { "q", "*:*", "facet", "true", "facet.field", "{!ex=t1}SubjectTerms_mfacet", "fq", "{!tag=t1}SubjectTerms_mfacet:(test 1)", "facet.limit", "10", "facet.mincount", "1" });
// test field that is valid in schema but missing in all shards
query(false, new Object[] { "q", "*:*", "rows", 100, "facet", "true", "facet.field", missingField, "facet.mincount", 2 });
// test field that is valid in schema and missing in some shards
query(false, new Object[] { "q", "*:*", "rows", 100, "facet", "true", "facet.field", oddField, "facet.mincount", 2 });
query(false, new Object[] { "q", "*:*", "sort", i1 + " desc", "stats", "true", "stats.field", i1 });
/*** TODO: the failure may come back in "exception"
try {
// test error produced for field that is invalid for schema
query("q","*:*", "rows",100, "facet","true", "facet.field",invalidField, "facet.mincount",2);
TestCase.fail("SolrServerException expected for invalid field that is not in schema");
} catch (SolrServerException ex) {
// expected
}
***/
// Try to get better coverage for refinement queries by turning off over requesting.
// This makes it much more likely that we may not get the top facet values and hence
// we turn of that checking.
handle.put("facet_fields", SKIPVAL);
query(false, new Object[] { "q", "*:*", "rows", 0, "facet", "true", "facet.field", t1, "facet.limit", 5, "facet.shard.limit", 5 });
// check a complex key name
query(false, new Object[] { "q", "*:*", "rows", 0, "facet", "true", "facet.field", "{!key='a b/c \\' \\} foo'}" + t1, "facet.limit", 5, "facet.shard.limit", 5 });
handle.remove("facet_fields");
// don't blow up.
if (clients.size() >= 2) {
index(id, 100, i1, 107, t1, "oh no, a duplicate!");
for (int i = 0; i < clients.size(); i++) {
index_specific(i, id, 100, i1, 107, t1, "oh no, a duplicate!");
}
commit();
query(false, new Object[] { "q", "duplicate", "hl", "true", "hl.fl", t1 });
query(false, new Object[] { "q", "fox duplicate horses", "hl", "true", "hl.fl", t1 });
query(false, new Object[] { "q", "*:*", "rows", 100 });
}
// test debugging
handle.put("explain", SKIPVAL);
handle.put("debug", UNORDERED);
handle.put("time", SKIPVAL);
handle.put("track", SKIP);
query(false, new Object[] { "q", "now their fox sat had put", "fl", "*,score", CommonParams.DEBUG_QUERY, "true" });
query(false, new Object[] { "q", "id:[1 TO 5]", CommonParams.DEBUG_QUERY, "true" });
query(false, new Object[] { "q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.TIMING });
query(false, new Object[] { "q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.RESULTS });
query(false, new Object[] { "q", "id:[1 TO 5]", CommonParams.DEBUG, CommonParams.QUERY });
// try add commitWithin
long before = cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound();
for (SolrClient client : clients) {
assertEquals("unexpected pre-commitWithin document count on node: " + ((HttpSolrClient) client).getBaseURL(), before, client.query(new SolrQuery("*:*")).getResults().getNumFound());
}
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("commitWithin", 10);
add(cloudClient, params, getDoc("id", 300), getDoc("id", 301));
waitForDocCount(before + 2, 30000, "add commitWithin did not work");
// try deleteById commitWithin
UpdateRequest deleteByIdReq = new UpdateRequest();
deleteByIdReq.deleteById("300");
deleteByIdReq.setCommitWithin(10);
deleteByIdReq.process(cloudClient);
waitForDocCount(before + 1, 30000, "deleteById commitWithin did not work");
// try deleteByQuery commitWithin
UpdateRequest deleteByQueryReq = new UpdateRequest();
deleteByQueryReq.deleteByQuery("id:301");
deleteByQueryReq.setCommitWithin(10);
deleteByQueryReq.process(cloudClient);
waitForDocCount(before, 30000, "deleteByQuery commitWithin did not work");
// TODO: This test currently fails because debug info is obtained only
// on shards with matches.
// query("q","matchesnothing","fl","*,score", "debugQuery", "true");
// would be better if these where all separate tests - but much, much
// slower
doOptimisticLockingAndUpdating();
testShardParamVariations();
testMultipleCollections();
testANewCollectionInOneInstance();
testSearchByCollectionName();
testUpdateByCollectionName();
testANewCollectionInOneInstanceWithManualShardAssignement();
testNumberOfCommitsWithCommitAfterAdd();
testUpdateProcessorsRunOnlyOnce("distrib-dup-test-chain-explicit");
testUpdateProcessorsRunOnlyOnce("distrib-dup-test-chain-implicit");
testStopAndStartCoresInOneInstance();
}
use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.
the class BaseCdcrDistributedZkTest method waitForCollectionToDisappear.
private void waitForCollectionToDisappear(String collection) throws Exception {
CloudSolrClient client = this.createCloudClient(null);
try {
client.connect();
ZkStateReader zkStateReader = client.getZkStateReader();
AbstractDistribZkTestBase.waitForCollectionToDisappear(collection, zkStateReader, false, true, 15);
} finally {
client.close();
}
}
use of org.apache.solr.common.cloud.ZkStateReader in project lucene-solr by apache.
the class ChaosMonkeyShardSplitTest method waitTillRecovered.
private void waitTillRecovered() throws Exception {
for (int i = 0; i < 30; i++) {
Thread.sleep(3000);
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
zkStateReader.forceUpdateCollection("collection1");
ClusterState clusterState = zkStateReader.getClusterState();
DocCollection collection1 = clusterState.getCollection("collection1");
Slice slice = collection1.getSlice("shard1");
Collection<Replica> replicas = slice.getReplicas();
boolean allActive = true;
for (Replica replica : replicas) {
if (!clusterState.liveNodesContain(replica.getNodeName()) || replica.getState() != Replica.State.ACTIVE) {
allActive = false;
break;
}
}
if (allActive) {
return;
}
}
printLayout();
fail("timeout waiting to see recovered node");
}
Aggregations