Search in sources :

Example 46 with SolrClient

use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.

the class TestJsonFacets method doStatsTemplated.

public static void doStatsTemplated(Client client, ModifiableSolrParams p) throws Exception {
    p.set("Z_num_i", "Z_" + p.get("num_i"));
    p.set("sparse_num_d", "sparse_" + p.get("num_d"));
    if (p.get("num_is") == null)
        p.add("num_is", "num_is");
    if (p.get("num_fs") == null)
        p.add("num_fs", "num_fs");
    String terms = p.get("terms");
    if (terms == null)
        terms = "";
    int limit = 0;
    switch(random().nextInt(4)) {
        case 0:
            limit = -1;
        case 1:
            limit = 1000000;
        // fallthrough
        case 2:
        // fallthrough
        case 3:
    }
    if (limit != 0) {
        terms = terms + "limit:" + limit + ",";
    }
    String terms_method = p.get("terms_method");
    if (terms_method != null) {
        terms = terms + terms_method;
    }
    p.set("terms", terms);
    // "${terms}" should be put at the beginning of generic terms facets.
    // It may specify "method=..." or "limit:-1", so should not be used if the facet explicitly specifies.
    MacroExpander m = new MacroExpander(p.getMap());
    String cat_s = m.expand("${cat_s}");
    String where_s = m.expand("${where_s}");
    String num_d = m.expand("${num_d}");
    String num_i = m.expand("${num_i}");
    String num_is = m.expand("${num_is}");
    String num_fs = m.expand("${num_fs}");
    String Z_num_i = m.expand("${Z_num_i}");
    String val_b = m.expand("${val_b}");
    String date = m.expand("${date}");
    String super_s = m.expand("${super_s}");
    String sparse_s = m.expand("${sparse_s}");
    String multi_ss = m.expand("${multi_ss}");
    String sparse_num_d = m.expand("${sparse_num_d}");
    client.deleteByQuery("*:*", null);
    Client iclient = client;
    /*** This code was not needed yet, but may be needed if we want to force empty shard results more often.
    // create a new indexing client that doesn't use one shard to better test for empty or non-existent results
    if (!client.local()) {
      List<SolrClient> shards = client.getClientProvider().all();
      iclient = new Client(shards.subList(0, shards.size()-1), client.getClientProvider().getSeed());
     }
     ***/
    SolrInputDocument doc = sdoc("id", "1", cat_s, "A", where_s, "NY", num_d, "4", sparse_num_d, "6", num_i, "2", num_is, "2", num_is, "-5", num_fs, "2", num_fs, "-5", super_s, "zodiac", date, "2001-01-01T01:01:01Z", val_b, "true", sparse_s, "one");
    iclient.add(doc, null);
    iclient.add(doc, null);
    // a couple of deleted docs
    iclient.add(doc, null);
    iclient.add(sdoc("id", "2", cat_s, "B", where_s, "NJ", num_d, "-9", num_i, "-5", num_is, "3", num_is, "-1", num_fs, "3", num_fs, "-1.5", super_s, "superman", date, "2002-02-02T02:02:02Z", val_b, "false", multi_ss, "a", multi_ss, "b", Z_num_i, "0"), null);
    iclient.add(sdoc("id", "3"), null);
    iclient.commit();
    iclient.add(sdoc("id", "4", cat_s, "A", where_s, "NJ", num_d, "2", sparse_num_d, "-4", num_i, "3", num_is, "0", num_is, "3", num_fs, "0", num_fs, "3", super_s, "spiderman", date, "2003-03-03T03:03:03Z", multi_ss, "b", Z_num_i, "" + Integer.MIN_VALUE), null);
    iclient.add(sdoc("id", "5", cat_s, "B", where_s, "NJ", num_d, "11", num_i, "7", num_is, "0", num_fs, "0", super_s, "batman", date, "2001-02-03T01:02:03Z", sparse_s, "two", multi_ss, "a"), null);
    iclient.commit();
    iclient.add(sdoc("id", "6", cat_s, "B", where_s, "NY", num_d, "-5", num_i, "-5", num_is, "-1", num_fs, "-1.5", super_s, "hulk", date, "2002-03-01T03:02:01Z", multi_ss, "b", multi_ss, "a", Z_num_i, "" + Integer.MAX_VALUE), null);
    iclient.commit();
    client.commit();
    // test for presence of debugging info
    ModifiableSolrParams debugP = params(p);
    debugP.set("debugQuery", "true");
    client.testJQ(params(debugP, "q", "*:*", "json.facet", "{catA:{query:{q:'${cat_s}:A'}},  catA2:{query:{query:'${cat_s}:A'}},  catA3:{query:'${cat_s}:A'}    }"), "facets=={ 'count':6, 'catA':{ 'count':2}, 'catA2':{ 'count':2}, 'catA3':{ 'count':2}}", // just test for presence, not exact structure / values
    "debug/facet-trace==");
    // straight query facets
    client.testJQ(params(p, "q", "*:*", "json.facet", "{catA:{query:{q:'${cat_s}:A'}},  catA2:{query:{query:'${cat_s}:A'}},  catA3:{query:'${cat_s}:A'}    }"), "facets=={ 'count':6, 'catA':{ 'count':2}, 'catA2':{ 'count':2}, 'catA3':{ 'count':2}}");
    // nested query facets
    client.testJQ(params(p, "q", "*:*", "json.facet", "{ catB:{type:query, q:'${cat_s}:B', facet:{nj:{query:'${where_s}:NJ'}, ny:{query:'${where_s}:NY'}} }}"), "facets=={ 'count':6, 'catB':{'count':3, 'nj':{'count':2}, 'ny':{'count':1}}}");
    // nested query facets on subset
    client.testJQ(params(p, "q", "id:(2 3)", "json.facet", "{ catB:{query:{q:'${cat_s}:B', facet:{nj:{query:'${where_s}:NJ'}, ny:{query:'${where_s}:NY'}} }}}"), "facets=={ 'count':2, 'catB':{'count':1, 'nj':{'count':1}, 'ny':{'count':0}}}");
    // nested query facets with stats
    client.testJQ(params(p, "q", "*:*", "json.facet", "{ catB:{query:{q:'${cat_s}:B', facet:{nj:{query:{q:'${where_s}:NJ'}}, ny:{query:'${where_s}:NY'}} }}}"), "facets=={ 'count':6, 'catB':{'count':3, 'nj':{'count':2}, 'ny':{'count':1}}}");
    // field/terms facet
    client.testJQ(params(p, "q", "*:*", "json.facet", "{c1:{field:'${cat_s}'}, c2:{field:{field:'${cat_s}'}}, c3:{${terms} type:terms, field:'${cat_s}'}  }"), "facets=={ 'count':6, " + "'c1':{ 'buckets':[{ 'val':'B', 'count':3}, { 'val':'A', 'count':2}]}, " + "'c2':{ 'buckets':[{ 'val':'B', 'count':3}, { 'val':'A', 'count':2}]}, " + "'c3':{ 'buckets':[{ 'val':'B', 'count':3}, { 'val':'A', 'count':2}]}} ");
    // test mincount
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f1:{terms:{${terms} field:'${cat_s}', mincount:3}}}"), "facets=={ 'count':6, " + "'f1':{  'buckets':[{ 'val':'B', 'count':3}]} } ");
    // test default mincount of 1
    client.testJQ(params(p, "q", "id:1", "json.facet", "{f1:{terms:'${cat_s}'}}"), "facets=={ 'count':1, " + "'f1':{  'buckets':[{ 'val':'A', 'count':1}]} } ");
    // test  mincount of 0 - need processEmpty for distrib to match up
    client.testJQ(params(p, "q", "id:1", "json.facet", "{processEmpty:true, f1:{terms:{${terms} field:'${cat_s}', mincount:0}}}"), "facets=={ 'count':1, " + "'f1':{  'buckets':[{ 'val':'A', 'count':1}, { 'val':'B', 'count':0}]} } ");
    // test  mincount of 0 with stats, need processEmpty for distrib to match up
    client.testJQ(params(p, "q", "id:1", "json.facet", "{processEmpty:true, f1:{terms:{${terms} field:'${cat_s}', mincount:0, allBuckets:true, facet:{n1:'sum(${num_d})'}  }}}"), "facets=={ 'count':1, " + "'f1':{ allBuckets:{ 'count':1, n1:4.0}, 'buckets':[{ 'val':'A', 'count':1, n1:4.0}, { 'val':'B', 'count':0 /*, n1:0.0 */ }]} } ");
    // test sorting by other stats
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f1:{terms:{${terms} field:'${cat_s}', sort:'n1 desc', facet:{n1:'sum(${num_d})'}  }}" + " , f2:{terms:{${terms} field:'${cat_s}', sort:'n1 asc', facet:{n1:'sum(${num_d})'}  }} }"), "facets=={ 'count':6, " + "  f1:{  'buckets':[{ val:'A', count:2, n1:6.0 }, { val:'B', count:3, n1:-3.0}]}" + ", f2:{  'buckets':[{ val:'B', count:3, n1:-3.0}, { val:'A', count:2, n1:6.0 }]} }");
    // test sorting by other stats
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f1:{${terms} type:terms, field:'${cat_s}', sort:'x desc', facet:{x:'min(${num_d})'}  }" + " , f2:{${terms} type:terms, field:'${cat_s}', sort:'x desc', facet:{x:'max(${num_d})'}  } " + " , f3:{${terms} type:terms, field:'${cat_s}', sort:'x desc', facet:{x:'unique(${where_s})'}  } " + " , f4:{${terms} type:terms, field:'${cat_s}', sort:'x desc', facet:{x:'hll(${where_s})'}  } " + " , f5:{${terms} type:terms, field:'${cat_s}', sort:'x desc', facet:{x:'variance(${num_d})'}  } " + // facet on a field that will cause hashing and exercise hll.resize on numeric field
    " , f6:{type:terms, field:${num_d}, limit:1, sort:'x desc', facet:{x:'hll(${num_i})'}  } " + "}"), "facets=={ 'count':6, " + "  f1:{  'buckets':[{ val:'A', count:2, x:2.0 },  { val:'B', count:3, x:-9.0}]}" + ", f2:{  'buckets':[{ val:'B', count:3, x:11.0 }, { val:'A', count:2, x:4.0 }]} " + ", f3:{  'buckets':[{ val:'A', count:2, x:2 },    { val:'B', count:3, x:2 }]} " + ", f4:{  'buckets':[{ val:'A', count:2, x:2 },    { val:'B', count:3, x:2 }]} " + ", f5:{  'buckets':[{ val:'B', count:3, x:74.6666666666666 },    { val:'A', count:2, x:1.0 }]} " + ", f6:{  buckets:[{ val:-9.0, count:1, x:1 }]} " + "}");
    // test sorting by stat with function
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f1:{terms:{${terms} field:'${cat_s}', sort:'n1 desc', facet:{n1:'avg(add(${num_d},${num_d}))'}  }}" + " , f2:{terms:{${terms} field:'${cat_s}', sort:'n1 asc', facet:{n1:'avg(add(${num_d},${num_d}))'}  }} }"), "facets=={ 'count':6, " + "  f1:{  'buckets':[{ val:'A', count:2, n1:6.0 }, { val:'B', count:3, n1:-2.0}]}" + ", f2:{  'buckets':[{ val:'B', count:3, n1:-2.0}, { val:'A', count:2, n1:6.0 }]} }");
    // percentiles 0,10,50,90,100
    // catA: 2.0 2.2 3.0 3.8 4.0
    // catB: -9.0 -8.2 -5.0 7.800000000000001 11.0
    // all: -9.0 -7.3999999999999995 2.0 8.200000000000001 11.0
    // test sorting by single percentile
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f1:{terms:{${terms} field:'${cat_s}', sort:'n1 desc', facet:{n1:'percentile(${num_d},50)'}  }}" + " , f2:{terms:{${terms} field:'${cat_s}', sort:'n1 asc', facet:{n1:'percentile(${num_d},50)'}  }} " + " , f3:{terms:{${terms} field:'${cat_s}', sort:'n1 desc', facet:{n1:'percentile(${sparse_num_d},50)'}  }} " + "}"), "facets=={ 'count':6, " + "  f1:{  'buckets':[{ val:'A', count:2, n1:3.0 }, { val:'B', count:3, n1:-5.0}]}" + ", f2:{  'buckets':[{ val:'B', count:3, n1:-5.0}, { val:'A', count:2, n1:3.0 }]}" + ", f3:{  'buckets':[{ val:'A', count:2, n1:1.0}, { val:'B', count:3}]}" + "}");
    // test sorting by multiple percentiles (sort is by first)
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f1:{terms:{${terms} field:${cat_s}, sort:'n1 desc', facet:{n1:'percentile(${num_d},50,0,100)'}  }}" + " , f2:{terms:{${terms} field:${cat_s}, sort:'n1 asc', facet:{n1:'percentile(${num_d},50,0,100)'}  }} }"), "facets=={ 'count':6, " + "  f1:{  'buckets':[{ val:'A', count:2, n1:[3.0,2.0,4.0] }, { val:'B', count:3, n1:[-5.0,-9.0,11.0] }]}" + ", f2:{  'buckets':[{ val:'B', count:3, n1:[-5.0,-9.0,11.0]}, { val:'A', count:2, n1:[3.0,2.0,4.0] }]} }");
    // test sorting by count/index order
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f1:{terms:{${terms} field:'${cat_s}', sort:'count desc' }  }" + "           , f2:{terms:{${terms} field:'${cat_s}', sort:'count asc'  }  }" + "           , f3:{terms:{${terms} field:'${cat_s}', sort:'index asc'  }  }" + "           , f4:{terms:{${terms} field:'${cat_s}', sort:'index desc' }  }" + "}"), "facets=={ count:6 " + " ,f1:{buckets:[ {val:B,count:3}, {val:A,count:2} ] }" + " ,f2:{buckets:[ {val:A,count:2}, {val:B,count:3} ] }" + " ,f3:{buckets:[ {val:A,count:2}, {val:B,count:3} ] }" + " ,f4:{buckets:[ {val:B,count:3}, {val:A,count:2} ] }" + "}");
    // test sorting by default count/index order
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f1:{terms:{${terms} field:'${cat_s}', sort:'count' }  }" + "           , f2:{terms:{${terms} field:'${cat_s}', sort:'count asc'  }  }" + "           , f3:{terms:{${terms} field:'${cat_s}', sort:'index'  }  }" + "           , f4:{terms:{${terms} field:'${cat_s}', sort:'index desc' }  }" + "}"), "facets=={ count:6 " + " ,f1:{buckets:[ {val:B,count:3}, {val:A,count:2} ] }" + " ,f2:{buckets:[ {val:A,count:2}, {val:B,count:3} ] }" + " ,f3:{buckets:[ {val:A,count:2}, {val:B,count:3} ] }" + " ,f4:{buckets:[ {val:B,count:3}, {val:A,count:2} ] }" + "}");
    // test tiebreaks when sorting by count
    client.testJQ(params(p, "q", "id:1 id:6", "json.facet", "{f1:{terms:{${terms} field:'${cat_s}', sort:'count desc' }  }" + "           , f2:{terms:{${terms} field:'${cat_s}', sort:'count asc'  }  }" + "}"), "facets=={ count:2 " + " ,f1:{buckets:[ {val:A,count:1}, {val:B,count:1} ] }" + " ,f2:{buckets:[ {val:A,count:1}, {val:B,count:1} ] }" + "}");
    // terms facet with nested query facet
    client.testJQ(params(p, "q", "*:*", "json.facet", "{cat:{terms:{${terms} field:'${cat_s}', facet:{nj:{query:'${where_s}:NJ'}}    }   }} }"), "facets=={ 'count':6, " + "'cat':{ 'buckets':[{ 'val':'B', 'count':3, 'nj':{ 'count':2}}, { 'val':'A', 'count':2, 'nj':{ 'count':1}}]} }");
    // terms facet with nested query facet on subset
    client.testJQ(params(p, "q", "id:(2 5 4)", "json.facet", "{cat:{terms:{${terms} field:'${cat_s}', facet:{nj:{query:'${where_s}:NJ'}}    }   }} }"), "facets=={ 'count':3, " + "'cat':{ 'buckets':[{ 'val':'B', 'count':2, 'nj':{ 'count':2}}, { 'val':'A', 'count':1, 'nj':{ 'count':1}}]} }");
    // test prefix
    client.testJQ(params(p, "q", "*:*", // even with mincount=0, we should only see buckets with the prefix
    "json.facet", // even with mincount=0, we should only see buckets with the prefix
    "{f1:{terms:{${terms} field:${super_s}, prefix:s, mincount:0 }}}"), "facets=={ 'count':6, " + "'f1':{ 'buckets':[{val:spiderman, count:1}, {val:superman, count:1}]} } ");
    // test prefix that doesn't exist
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f1:{terms:{${terms} field:${super_s}, prefix:ttt, mincount:0 }}}"), "facets=={ 'count':6, " + "'f1':{ 'buckets':[]} } ");
    // test prefix that doesn't exist at start
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f1:{terms:{${terms} field:${super_s}, prefix:aaaaaa, mincount:0 }}}"), "facets=={ 'count':6, " + "'f1':{ 'buckets':[]} } ");
    // test prefix that doesn't exist at end
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f1:{terms:{${terms} field:${super_s}, prefix:zzzzzz, mincount:0 }}}"), "facets=={ 'count':6, " + "'f1':{ 'buckets':[]} } ");
    // test prefix on where field
    client.testJQ(params(p, "q", "*:*", "json.facet", "{" + " f1:{${terms} type:terms, field:${where_s}, prefix:N  }" + ",f2:{${terms} type:terms, field:${where_s}, prefix:NY }" + ",f3:{${terms} type:terms, field:${where_s}, prefix:NJ }" + "}"), "facets=={ 'count':6 " + ",f1:{ 'buckets':[ {val:NJ,count:3}, {val:NY,count:2} ]}" + ",f2:{ 'buckets':[ {val:NY,count:2} ]}" + ",f3:{ 'buckets':[ {val:NJ,count:3} ]}" + " } ");
    // test prefix on real multi-valued field
    client.testJQ(params(p, "q", "*:*", "json.facet", "{" + " f1:{${terms} type:terms, field:${multi_ss}, prefix:A  }" + ",f2:{${terms} type:terms, field:${multi_ss}, prefix:z }" + ",f3:{${terms} type:terms, field:${multi_ss}, prefix:aa }" + ",f4:{${terms} type:terms, field:${multi_ss}, prefix:bb }" + ",f5:{${terms} type:terms, field:${multi_ss}, prefix:a }" + ",f6:{${terms} type:terms, field:${multi_ss}, prefix:b }" + "}"), "facets=={ 'count':6 " + ",f1:{buckets:[]}" + ",f2:{buckets:[]}" + ",f3:{buckets:[]}" + ",f4:{buckets:[]}" + ",f5:{buckets:[ {val:a,count:3} ]}" + ",f6:{buckets:[ {val:b,count:3} ]}" + " } ");
    //
    // missing
    //
    // test missing w/ non-existent field
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f1:{terms:{${terms} field:${noexist}, missing:true}}}"), "facets=={ 'count':6, " + "'f1':{ 'buckets':[], missing:{count:6} } } ");
    // test missing
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f1:{terms:{${terms} field:${sparse_s}, missing:true }}}"), "facets=={ 'count':6, " + "'f1':{ 'buckets':[{val:one, count:1}, {val:two, count:1}], missing:{count:4} } } ");
    // test missing with stats
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f1:{terms:{${terms} field:${sparse_s}, missing:true, facet:{x:'sum(${num_d})'}   }}}"), "facets=={ 'count':6, " + "'f1':{ 'buckets':[{val:one, count:1, x:4.0}, {val:two, count:1, x:11.0}], missing:{count:4, x:-12.0}   } } ");
    // test that the missing bucket is not affected by any prefix
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f1:{terms:{${terms} field:${sparse_s}, missing:true, prefix:on, facet:{x:'sum(${num_d})'}   }}}"), "facets=={ 'count':6, " + "'f1':{ 'buckets':[{val:one, count:1, x:4.0}], missing:{count:4, x:-12.0}   } } ");
    // test missing with prefix that doesn't exist
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f1:{terms:{${terms} field:${sparse_s}, missing:true, prefix:ppp, facet:{x:'sum(${num_d})'}   }}}"), "facets=={ 'count':6, " + "'f1':{ 'buckets':[], missing:{count:4, x:-12.0}   } } ");
    // test numBuckets
    client.testJQ(params(p, "q", "*:*", "rows", "0", "facet", "true", // TODO: limit:0 produced an error
    "json.facet", // TODO: limit:0 produced an error
    "{f1:{terms:{${terms_method} field:${cat_s}, numBuckets:true, limit:1}}}"), "facets=={ 'count':6, " + "'f1':{ numBuckets:2, buckets:[{val:B, count:3}]} } ");
    // prefix should lower numBuckets
    client.testJQ(params(p, "q", "*:*", "rows", "0", "facet", "true", "json.facet", "{f1:{terms:{${terms} field:${cat_s}, numBuckets:true, prefix:B}}}"), "facets=={ 'count':6, " + "'f1':{ numBuckets:1, buckets:[{val:B, count:3}]} } ");
    // mincount should not lower numBuckets (since SOLR-10552)
    client.testJQ(params(p, "q", "*:*", "rows", "0", "facet", "true", "json.facet", "{f1:{terms:{${terms} field:${cat_s}, numBuckets:true, mincount:3}}}"), "facets=={ 'count':6, " + "'f1':{ numBuckets:2, buckets:[{val:B, count:3}]} } ");
    // basic range facet
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f:{type:range, field:${num_d}, start:-5, end:10, gap:5}}"), "facets=={count:6, f:{buckets:[ {val:-5.0,count:1}, {val:0.0,count:2}, {val:5.0,count:0} ] } }");
    // basic range facet on dates
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f:{type:range, field:${date}, start:'2001-01-01T00:00:00Z', end:'2003-01-01T00:00:00Z', gap:'+1YEAR'}}"), "facets=={count:6, f:{buckets:[ {val:'2001-01-01T00:00:00Z',count:2}, {val:'2002-01-01T00:00:00Z',count:2}] } }");
    // range facet on dates w/ stats
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f:{type:range, field:${date}, start:'2002-01-01T00:00:00Z', end:'2005-01-01T00:00:00Z', gap:'+1YEAR',   other:all, facet:{ x:'avg(${num_d})' } } }"), "facets=={count:6, f:{buckets:[ {val:'2002-01-01T00:00:00Z',count:2,x:-7.0}, {val:'2003-01-01T00:00:00Z',count:1,x:2.0}, {val:'2004-01-01T00:00:00Z',count:0}], before:{count:2,x:7.5}, after:{count:0}, between:{count:3,x:-4.0}  } }");
    // basic range facet with "include" params
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f:{range:{field:${num_d}, start:-5, end:10, gap:5, include:upper}}}"), "facets=={count:6, f:{buckets:[ {val:-5.0,count:0}, {val:0.0,count:2}, {val:5.0,count:0} ] } }");
    // range facet with sub facets and stats
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f:{range:{field:${num_d}, start:-5, end:10, gap:5,   facet:{ x:'sum(${num_i})', ny:{query:'${where_s}:NY'}}   }}}"), "facets=={count:6, f:{buckets:[ {val:-5.0,count:1,x:-5.0,ny:{count:1}}, {val:0.0,count:2,x:5.0,ny:{count:1}}, {val:5.0,count:0 /* ,x:0.0,ny:{count:0} */ } ] } }");
    // range facet with sub facets and stats, with "other:all"
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f:{range:{field:${num_d}, start:-5, end:10, gap:5, other:all,   facet:{ x:'sum(${num_i})', ny:{query:'${where_s}:NY'}}   }}}"), "facets=={count:6, f:{buckets:[ {val:-5.0,count:1,x:-5.0,ny:{count:1}}, {val:0.0,count:2,x:5.0,ny:{count:1}}, {val:5.0,count:0 /* ,x:0.0,ny:{count:0} */} ]" + ",before: {count:1,x:-5.0,ny:{count:0}}" + ",after:  {count:1,x:7.0, ny:{count:0}}" + ",between:{count:3,x:0.0, ny:{count:2}}" + " } }");
    // range facet with mincount
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f:{type:range, field:${num_d}, start:-5, end:10, gap:5, other:all, mincount:2,    facet:{ x:'sum(${num_i})', ny:{query:'${where_s}:NY'}}   }}"), "facets=={count:6, f:{buckets:[  {val:0.0,count:2,x:5.0,ny:{count:1}} ]" + ",before: {count:1,x:-5.0,ny:{count:0}}" + ",after:  {count:1,x:7.0, ny:{count:0}}" + ",between:{count:3,x:0.0, ny:{count:2}}" + " } }");
    // range facet with sub facets and stats, with "other:all", on subset
    client.testJQ(params(p, "q", "id:(3 4 6)", "json.facet", "{f:{range:{field:${num_d}, start:-5, end:10, gap:5, other:all,   facet:{ x:'sum(${num_i})', ny:{query:'${where_s}:NY'}}   }}}"), "facets=={count:3, f:{buckets:[ {val:-5.0,count:1,x:-5.0,ny:{count:1}}, {val:0.0,count:1,x:3.0,ny:{count:0}}, {val:5.0,count:0 /* ,x:0.0,ny:{count:0} */} ]" + ",before: {count:0 /* ,x:0.0,ny:{count:0} */ }" + ",after:  {count:0 /* ,x:0.0,ny:{count:0} */}" + ",between:{count:2,x:-2.0, ny:{count:1}}" + " } }");
    // stats at top level
    client.testJQ(params(p, "q", "*:*", "json.facet", "{ sum1:'sum(${num_d})', sumsq1:'sumsq(${num_d})', avg1:'avg(${num_d})', avg2:'avg(def(${num_d},0))', min1:'min(${num_d})', max1:'max(${num_d})'" + ", numwhere:'unique(${where_s})', unique_num_i:'unique(${num_i})', unique_num_d:'unique(${num_d})', unique_date:'unique(${date})'" + ", where_hll:'hll(${where_s})', hll_num_i:'hll(${num_i})', hll_num_d:'hll(${num_d})', hll_date:'hll(${date})'" + ", med:'percentile(${num_d},50)', perc:'percentile(${num_d},0,50.0,100)', variance:'variance(${num_d})', stddev:'stddev(${num_d})' }"), "facets=={ 'count':6, " + "sum1:3.0, sumsq1:247.0, avg1:0.6, avg2:0.5, min1:-9.0, max1:11.0" + ", numwhere:2, unique_num_i:4, unique_num_d:5, unique_date:5" + ", where_hll:2, hll_num_i:4, hll_num_d:5, hll_date:5" + ", med:2.0, perc:[-9.0,2.0,11.0], variance:49.04, stddev:7.002856560004639}");
    // stats at top level, no matches
    client.testJQ(params(p, "q", "id:DOESNOTEXIST", "json.facet", "{ sum1:'sum(${num_d})', sumsq1:'sumsq(${num_d})', avg1:'avg(${num_d})', min1:'min(${num_d})', max1:'max(${num_d})'" + ", numwhere:'unique(${where_s})', unique_num_i:'unique(${num_i})', unique_num_d:'unique(${num_d})', unique_date:'unique(${date})'" + ", where_hll:'hll(${where_s})', hll_num_i:'hll(${num_i})', hll_num_d:'hll(${num_d})', hll_date:'hll(${date})'" + ", med:'percentile(${num_d},50)', perc:'percentile(${num_d},0,50.0,100)', variance:'variance(${num_d})', stddev:'stddev(${num_d})' }"), "facets=={count:0 " + "\n//  ,sum1:0.0, sumsq1:0.0, avg1:0.0, min1:'NaN', max1:'NaN', numwhere:0 \n" + " }");
    // stats at top level, matching documents, but no values in the field
    // NOTE: this represents the current state of what is returned, not the ultimate desired state.
    client.testJQ(params(p, "q", "id:3", "json.facet", "{ sum1:'sum(${num_d})', sumsq1:'sumsq(${num_d})', avg1:'avg(${num_d})', min1:'min(${num_d})', max1:'max(${num_d})'" + ", numwhere:'unique(${where_s})', unique_num_i:'unique(${num_i})', unique_num_d:'unique(${num_d})', unique_date:'unique(${date})'" + ", where_hll:'hll(${where_s})', hll_num_i:'hll(${num_i})', hll_num_d:'hll(${num_d})', hll_date:'hll(${date})'" + ", med:'percentile(${num_d},50)', perc:'percentile(${num_d},0,50.0,100)', variance:'variance(${num_d})', stddev:'stddev(${num_d})' }"), "facets=={count:1 " + ",sum1:0.0," + " sumsq1:0.0," + // TODO: undesirable. omit?
    " avg1:0.0," + // TODO: undesirable. omit?
    " min1:'NaN'," + " max1:'NaN'," + " numwhere:0," + " unique_num_i:0," + " unique_num_d:0," + " unique_date:0," + " where_hll:0," + " hll_num_i:0," + " hll_num_d:0," + " hll_date:0," + " variance:0.0," + " stddev:0.0" + " }");
    //
    // tests on a multi-valued field with actual multiple values, just to ensure that we are
    // using a multi-valued method for the rest of the tests when appropriate.
    //
    client.testJQ(params(p, "q", "*:*", "json.facet", "{cat:{terms:{${terms} field:'${multi_ss}', facet:{nj:{query:'${where_s}:NJ'}}    }   }} }"), "facets=={ 'count':6, " + "'cat':{ 'buckets':[{ 'val':'a', 'count':3, 'nj':{ 'count':2}}, { 'val':'b', 'count':3, 'nj':{ 'count':2}}]} }");
    // test unique on multi-valued field
    client.testJQ(params(p, "q", "*:*", "json.facet", "{" + "x:'unique(${multi_ss})'" + ",y:{query:{q:'id:2', facet:{x:'unique(${multi_ss})'} }}  " + ",x2:'hll(${multi_ss})'" + ",y2:{query:{q:'id:2', facet:{x:'hll(${multi_ss})'} }}  " + " }"), "facets=={count:6 " + ",x:2" + // single document should yield 2 unique values
    ",y:{count:1, x:2}" + ",x2:2" + // single document should yield 2 unique values
    ",y2:{count:1, x:2}" + " }");
    // test allBucket multi-valued
    client.testJQ(params(p, "q", "*:*", "json.facet", "{x:{terms:{${terms} field:'${multi_ss}',allBuckets:true}}}"), "facets=={ count:6, " + "x:{ buckets:[{val:a, count:3}, {val:b, count:3}] , allBuckets:{count:6} } }");
    // allBuckets for multi-valued field with stats.  This can sometimes take a different path of adding complete DocSets to the Acc
    // also test limit:0
    client.testJQ(params(p, "q", "*:*", "json.facet", "{" + " f0:{${terms_method} type:terms, field:${multi_ss}, allBuckets:true, limit:0} " + // offset with 0 limit
    ",f1:{${terms_method} type:terms, field:${multi_ss}, allBuckets:true, limit:0, offset:1} " + ",f2:{${terms_method} type:terms, field:${multi_ss}, allBuckets:true, limit:0, facet:{x:'sum(${num_d})'}, sort:'x desc' } " + ",f3:{${terms_method} type:terms, field:${multi_ss}, allBuckets:true, limit:0, missing:true, facet:{x:'sum(${num_d})', y:'avg(${num_d})'}, sort:'x desc' } " + "}"), "facets=={ 'count':6, " + " f0:{allBuckets:{count:6}, buckets:[]}" + ",f1:{allBuckets:{count:6}, buckets:[]}" + ",f2:{allBuckets:{count:6, x:-15.0}, buckets:[]} " + ",f3:{allBuckets:{count:6, x:-15.0, y:-2.5}, buckets:[], missing:{count:2, x:4.0, y:4.0} }} " + "}");
    // allBuckets with numeric field with stats.
    // also test limit:0
    client.testJQ(params(p, "q", "*:*", "json.facet", "{" + " f0:{${terms_method} type:terms, field:${num_i}, allBuckets:true, limit:0} " + // offset with 0 limit
    ",f1:{${terms_method} type:terms, field:${num_i}, allBuckets:true, limit:0, offset:1} " + ",f2:{${terms_method} type:terms, field:${num_i}, allBuckets:true, limit:0, facet:{x:'sum(${num_d})'}, sort:'x desc' } " + "}"), "facets=={ 'count':6, " + " f0:{allBuckets:{count:5}, buckets:[]}" + ",f1:{allBuckets:{count:5}, buckets:[]}" + ",f2:{allBuckets:{count:5, x:3.0}, buckets:[]} " + "}");
    //////////////////////////////////////////////////////////////////////////////////////////////////////////
    // test converting legacy facets
    // test mincount
    client.testJQ(params(p, "q", "*:*", // , "json.facet", "{f1:{terms:{field:'${cat_s}', mincount:3}}}"
    "facet", "true", "facet.version", "2", "facet.field", "{!key=f1}${cat_s}", "facet.mincount", "3"), "facets=={ 'count':6, " + "'f1':{  'buckets':[{ 'val':'B', 'count':3}]} } ");
    // test prefix
    client.testJQ(params(p, "q", "*:*", // , "json.facet", "{f1:{terms:{field:${super_s}, prefix:s, mincount:0 }}}"  // even with mincount=0, we should only see buckets with the prefix
    "facet", "true", "facet.version", "2", "facet.field", "{!key=f1}${super_s}", "facet.prefix", "s", "facet.mincount", "0"), "facets=={ 'count':6, " + "'f1':{ 'buckets':[{val:spiderman, count:1}, {val:superman, count:1}]} } ");
    // range facet with sub facets and stats
    client.testJQ(params(p, "q", "*:*", // , "json.facet", "{f:{range:{field:${num_d}, start:-5, end:10, gap:5,   facet:{ x:'sum(${num_i})', ny:{query:'${where_s}:NY'}}   }}}"
    "facet", "true", "facet.version", "2", "facet.range", "{!key=f}${num_d}", "facet.range.start", "-5", "facet.range.end", "10", "facet.range.gap", "5", "f.f.facet.stat", "x:sum(${num_i})", "subfacet.f.query", "{!key=ny}${where_s}:NY"), "facets=={count:6, f:{buckets:[ {val:-5.0,count:1,x:-5.0,ny:{count:1}}, {val:0.0,count:2,x:5.0,ny:{count:1}}, {val:5.0,count:0 /* ,x:0.0,ny:{count:0} */ } ] } }");
    // test sorting by stat
    client.testJQ(params(p, "q", "*:*", //    " , f2:{terms:{field:'${cat_s}', sort:'n1 asc', facet:{n1:'sum(${num_d})'}  }} }"
    "facet", "true", "facet.version", "2", "facet.field", "{!key=f1}${cat_s}", "f.f1.facet.sort", "n1 desc", "facet.stat", "n1:sum(${num_d})", "facet.field", "{!key=f2}${cat_s}", "f.f1.facet.sort", "n1 asc"), "facets=={ 'count':6, " + "  f1:{  'buckets':[{ val:'A', count:2, n1:6.0 }, { val:'B', count:3, n1:-3.0}]}" + ", f2:{  'buckets':[{ val:'B', count:3, n1:-3.0}, { val:'A', count:2, n1:6.0 }]} }");
    // range facet with sub facets and stats, with "other:all", on subset
    client.testJQ(params(p, "q", "id:(3 4 6)", //, "json.facet", "{f:{range:{field:${num_d}, start:-5, end:10, gap:5, other:all,   facet:{ x:'sum(${num_i})', ny:{query:'${where_s}:NY'}}   }}}"
    "facet", "true", "facet.version", "2", "facet.range", "{!key=f}${num_d}", "facet.range.start", "-5", "facet.range.end", "10", "facet.range.gap", "5", "f.f.facet.stat", "x:sum(${num_i})", "subfacet.f.query", "{!key=ny}${where_s}:NY", "facet.range.other", "all"), "facets=={count:3, f:{buckets:[ {val:-5.0,count:1,x:-5.0,ny:{count:1}}, {val:0.0,count:1,x:3.0,ny:{count:0}}, {val:5.0,count:0 /* ,x:0.0,ny:{count:0} */} ]" + ",before: {count:0 /* ,x:0.0,ny:{count:0} */ }" + ",after:  {count:0 /* ,x:0.0,ny:{count:0} */}" + ",between:{count:2,x:-2.0, ny:{count:1}}" + " } }");
    ////////////////////////////////////////////////////////////////////////////////////////////
    // multi-select / exclude tagged filters via excludeTags
    ////////////////////////////////////////////////////////////////////////////////////////////
    // test uncached multi-select (see SOLR-8496)
    client.testJQ(params(p, "q", "{!cache=false}*:*", "fq", "{!tag=doc3,allfilt}-id:3", "json.facet", "{" + "f1:{${terms} type:terms, field:${cat_s}, domain:{excludeTags:doc3} }  " + "}"), "facets=={ count:5, " + " f1:{ buckets:[ {val:B, count:3}, {val:A, count:2} ]  }" + "}");
    // test sub-facets of  empty buckets with domain filter exclusions (canProduceFromEmpty) (see SOLR-9519)
    client.testJQ(params(p, "q", "*:*", "fq", "{!tag=doc3}id:non-exist", "fq", "{!tag=CATA}${cat_s}:A", "json.facet", "{" + "f1:{${terms} type:terms, field:${cat_s}, domain:{excludeTags:doc3} }  " + // nested under query
    ",q1 :{type:query, q:'*:*', facet:{ f1:{${terms} type:terms, field:${cat_s}, domain:{excludeTags:doc3} } }  }  " + // nested under query, make sure id:4 filter still applies
    ",q1a:{type:query, q:'id:4', facet:{ f1:{${terms} type:terms, field:${cat_s}, domain:{excludeTags:doc3} } }  }  " + // nested under range, make sure range constraints still apply
    ",r1 :{type:range, field:${num_d}, start:0, gap:3, end:5,  facet:{ f1:{${terms} type:terms, field:${cat_s}, domain:{excludeTags:doc3} } }  }  " + // domain filter doesn't widen, so f2 should not appear.
    ",f2:{${terms} type:terms, field:${cat_s}, domain:{filter:'*:*'} }  " + "}"), "facets=={ count:0, " + " f1:{ buckets:[ {val:A, count:2} ]  }" + ",q1:{ count:0, f1:{buckets:[{val:A, count:2}]} }" + ",q1a:{ count:0, f1:{buckets:[{val:A, count:1}]} }" + ",r1:{ buckets:[ {val:0.0,count:0,f1:{buckets:[{val:A, count:1}]}}, {val:3.0,count:0,f1:{buckets:[{val:A, count:1}]}} ]  }" + "}");
    // nested query facets on subset (with excludeTags)
    client.testJQ(params(p, "q", "*:*", "fq", "{!tag=abc}id:(2 3)", "json.facet", "{ processEmpty:true," + " f1:{query:{q:'${cat_s}:B', facet:{nj:{query:'${where_s}:NJ'}, ny:{query:'${where_s}:NY'}} , excludeTags:[xyz,qaz]}}" + ",f2:{query:{q:'${cat_s}:B', facet:{nj:{query:'${where_s}:NJ'}, ny:{query:'${where_s}:NY'}} , excludeTags:abc }}" + ",f3:{query:{q:'${cat_s}:B', facet:{nj:{query:'${where_s}:NJ'}, ny:{query:'${where_s}:NY'}} , excludeTags:'xyz,abc,qaz' }}" + ",f4:{query:{q:'${cat_s}:B', facet:{nj:{query:'${where_s}:NJ'}, ny:{query:'${where_s}:NY'}} , excludeTags:[xyz , abc , qaz] }}" + // this is repeated, but it did fail when a single context was shared among sub-facets
    ",f5:{query:{q:'${cat_s}:B', facet:{nj:{query:'${where_s}:NJ'}, ny:{query:'${where_s}:NY'}} , excludeTags:[xyz,qaz]}}" + // exclude in a sub-facet
    ",f6:{query:{q:'${cat_s}:B', facet:{processEmpty:true, nj:{query:'${where_s}:NJ'}, ny:{ type:query, q:'${where_s}:NY', excludeTags:abc}}  }}" + // exclude in a sub-facet that doesn't match
    ",f7:{query:{q:'${cat_s}:B', facet:{processEmpty:true, nj:{query:'${where_s}:NJ'}, ny:{ type:query, q:'${where_s}:NY', excludeTags:xyz}}  }}" + "}"), "facets=={ 'count':2, " + " 'f1':{'count':1, 'nj':{'count':1}, 'ny':{'count':0}}" + ",'f2':{'count':3, 'nj':{'count':2}, 'ny':{'count':1}}" + ",'f3':{'count':3, 'nj':{'count':2}, 'ny':{'count':1}}" + ",'f4':{'count':3, 'nj':{'count':2}, 'ny':{'count':1}}" + ",'f5':{'count':1, 'nj':{'count':1}, 'ny':{'count':0}}" + ",'f6':{'count':1, 'nj':{'count':1}, 'ny':{'count':1}}" + ",'f7':{'count':1, 'nj':{'count':1}, 'ny':{'count':0}}" + "}");
    // terms facet with nested query facet (with excludeTags, using new format inside domain:{})
    client.testJQ(params(p, "q", "{!cache=false}*:*", "fq", "{!tag=doc6,allfilt}-id:6", "fq", "{!tag=doc3,allfilt}-id:3", "json.facet", "{processEmpty:true, " + " f0:{${terms} type:terms, field:${cat_s},                                    facet:{nj:{query:'${where_s}:NJ'}} }  " + ",f1:{${terms} type:terms, field:${cat_s}, domain:{excludeTags:doc3},   missing:true,  facet:{nj:{query:'${where_s}:NJ'}} }  " + ",f2:{${terms} type:terms, field:${cat_s}, domain:{excludeTags:allfilt},missing:true,  facet:{nj:{query:'${where_s}:NJ'}} }  " + ",f3:{${terms} type:terms, field:${cat_s}, domain:{excludeTags:doc6},   missing:true,  facet:{nj:{query:'${where_s}:NJ'}} }  " + "}"), "facets=={ count:4, " + " f0:{ buckets:[ {val:A, count:2, nj:{ count:1}}, {val:B, count:2, nj:{count:2}} ] }" + ",f1:{ buckets:[ {val:A, count:2, nj:{ count:1}}, {val:B, count:2, nj:{count:2}} ] , missing:{count:1,nj:{count:0}} }" + ",f2:{ buckets:[ {val:B, count:3, nj:{ count:2}}, {val:A, count:2, nj:{count:1}} ] , missing:{count:1,nj:{count:0}} }" + ",f3:{ buckets:[ {val:B, count:3, nj:{ count:2}}, {val:A, count:2, nj:{count:1}} ] , missing:{count:0} }" + "}");
    // range facet with sub facets and stats, with "other:all" (with excludeTags)
    client.testJQ(params(p, "q", "*:*", "fq", "{!tag=doc6,allfilt}-id:6", "fq", "{!tag=doc3,allfilt}-id:3", "json.facet", "{processEmpty:true " + ", f1:{type:range, field:${num_d}, start:-5, end:10, gap:5, other:all,   facet:{ x:'sum(${num_i})', ny:{query:'${where_s}:NY'}} , domain:{excludeTags:allfilt} }" + ", f2:{type:range, field:${num_d}, start:-5, end:10, gap:5, other:all,   facet:{ x:'sum(${num_i})', ny:{query:'${where_s}:NY'}}  }" + "}"), "facets=={count:4" + ",f1:{buckets:[ {val:-5.0,count:1,x:-5.0,ny:{count:1}}, {val:0.0,count:2,x:5.0,ny:{count:1}}, {val:5.0,count:0} ]" + ",before: {count:1,x:-5.0,ny:{count:0}}" + ",after:  {count:1,x:7.0, ny:{count:0}}" + ",between:{count:3,x:0.0, ny:{count:2}} }" + ",f2:{buckets:[ {val:-5.0,count:0}, {val:0.0,count:2,x:5.0,ny:{count:1}}, {val:5.0,count:0} ]" + ",before: {count:1,x:-5.0,ny:{count:0}}" + ",after:  {count:1,x:7.0, ny:{count:0}}" + ",between:{count:2,x:5.0, ny:{count:1}} }" + "}");
    //
    // facet on numbers
    //
    client.testJQ(params(p, "q", "*:*", "json.facet", "{" + " f1:{${terms}  type:field, field:${num_i} }" + ",f2:{${terms}  type:field, field:${num_i}, sort:'count asc' }" + ",f3:{${terms}  type:field, field:${num_i}, sort:'index asc' }" + ",f4:{${terms}  type:field, field:${num_i}, sort:'index desc' }" + ",f5:{${terms}  type:field, field:${num_i}, sort:'index desc', limit:1, missing:true, allBuckets:true, numBuckets:true }" + // mincount should not lower numbuckets (since SOLR-10552)
    ",f6:{${terms}  type:field, field:${num_i}, sort:'index desc', mincount:2, numBuckets:true }" + // test offset
    ",f7:{${terms}  type:field, field:${num_i}, sort:'index desc', offset:2, numBuckets:true }" + // test high offset
    ",f8:{${terms}  type:field, field:${num_i}, sort:'index desc', offset:100, numBuckets:true }" + // test stats
    ",f9:{${terms}  type:field, field:${num_i}, sort:'x desc', facet:{x:'avg(${num_d})'}, missing:true, allBuckets:true, numBuckets:true }" + // test subfacets
    ",f10:{${terms}  type:field, field:${num_i}, facet:{a:{query:'${cat_s}:A'}}, missing:true, allBuckets:true, numBuckets:true }" + // test subfacet using unique on numeric field (this previously triggered a resizing bug)
    ",f11:{${terms}  type:field, field:${num_i}, facet:{a:'unique(${num_d})'} ,missing:true, allBuckets:true, sort:'a desc' }" + "}"), "facets=={count:6 " + ",f1:{ buckets:[{val:-5,count:2},{val:2,count:1},{val:3,count:1},{val:7,count:1} ] } " + ",f2:{ buckets:[{val:2,count:1},{val:3,count:1},{val:7,count:1},{val:-5,count:2} ] } " + ",f3:{ buckets:[{val:-5,count:2},{val:2,count:1},{val:3,count:1},{val:7,count:1} ] } " + ",f4:{ buckets:[{val:7,count:1},{val:3,count:1},{val:2,count:1},{val:-5,count:2} ] } " + ",f5:{ buckets:[{val:7,count:1}]   , numBuckets:4, allBuckets:{count:5}, missing:{count:1}  } " + ",f6:{ buckets:[{val:-5,count:2}]  , numBuckets:4  } " + ",f7:{ buckets:[{val:2,count:1},{val:-5,count:2}] , numBuckets:4 } " + ",f8:{ buckets:[] , numBuckets:4 } " + // TODO: should missing exclude "x" because no values were collected?
    ",f9:{ buckets:[{val:7,count:1,x:11.0},{val:2,count:1,x:4.0},{val:3,count:1,x:2.0},{val:-5,count:2,x:-7.0} ],  numBuckets:4, allBuckets:{count:5,x:0.6},missing:{count:1,x:0.0} } " + ",f10:{ buckets:[{val:-5,count:2,a:{count:0}},{val:2,count:1,a:{count:1}},{val:3,count:1,a:{count:1}},{val:7,count:1,a:{count:0}} ],  numBuckets:4, allBuckets:{count:5},missing:{count:1,a:{count:0}} } " + ",f11:{ buckets:[{val:-5,count:2,a:2},{val:2,count:1,a:1},{val:3,count:1,a:1},{val:7,count:1,a:1} ] , missing:{count:1,a:0} , allBuckets:{count:5,a:5}  } " + "}");
    // facet on a float field - shares same code with integers/longs currently, so we only need to test labels/sorting
    client.testJQ(params(p, "q", "*:*", "json.facet", "{" + " f1:{${terms}  type:field, field:${num_d} }" + ",f2:{${terms}  type:field, field:${num_d}, sort:'index desc' }" + "}"), "facets=={count:6 " + ",f1:{ buckets:[{val:-9.0,count:1},{val:-5.0,count:1},{val:2.0,count:1},{val:4.0,count:1},{val:11.0,count:1} ] } " + ",f2:{ buckets:[{val:11.0,count:1},{val:4.0,count:1},{val:2.0,count:1},{val:-5.0,count:1},{val:-9.0,count:1} ] } " + "}");
    // test 0, min/max int
    client.testJQ(params(p, "q", "*:*", "json.facet", "{" + " u : 'unique(${Z_num_i})'" + ", f1:{${terms}  type:field, field:${Z_num_i} }" + "}"), "facets=={count:6 " + ",u:3" + ",f1:{ buckets:[{val:" + Integer.MIN_VALUE + ",count:1},{val:0,count:1},{val:" + Integer.MAX_VALUE + ",count:1}]} " + "}");
    // multi-valued integer
    client.testJQ(params(p, "q", "*:*", "json.facet", "{ " + " c1:'unique(${num_is})', c2:'hll(${num_is})'" + ",f1:{${terms} type:terms, field:${num_is} }  " + "}"), "facets=={ count:6 " + ", c1:5, c2:5" + ", f1:{ buckets:[ {val:-1,count:2},{val:0,count:2},{val:3,count:2},{val:-5,count:1},{val:2,count:1}  ] } " + "} ");
    // multi-valued float
    client.testJQ(params(p, "q", "*:*", "json.facet", "{ " + " c1:'unique(${num_fs})', c2:'hll(${num_fs})'" + ",f1:{${terms} type:terms, field:${num_fs} }  " + "}"), "facets=={ count:6 " + ", c1:5, c2:5" + ", f1:{ buckets:[ {val:-1.5,count:2},{val:0.0,count:2},{val:3.0,count:2},{val:-5.0,count:1},{val:2.0,count:1}  ] } " + "} ");
    client.testJQ(params(p, "q", "*:*", "json.facet", "{" + // "cat0:{type:terms, field:${cat_s}, sort:'count desc', limit:1, overrequest:0}" +  // overrequest=0 test needs predictable layout
    "cat1:{type:terms, field:${cat_s}, sort:'count desc', limit:1, overrequest:1}" + // -1 is default overrequest
    ",catDef:{type:terms, field:${cat_s}, sort:'count desc', limit:1, overrequest:-1}" + // make sure overflows don't mess us up
    ",catBig:{type:terms, field:${cat_s}, sort:'count desc', offset:1, limit:2147483647, overrequest:2147483647}" + "}"), "facets=={ count:6" + // ", cat0:{ buckets:[ {val:B,count:3} ] }"
    ", cat1:{ buckets:[ {val:B,count:3} ] }" + ", catDef:{ buckets:[ {val:B,count:3} ] }" + ", catBig:{ buckets:[ {val:A,count:2} ] }" + "}");
    // test filter
    client.testJQ(params(p, "q", "*:*", "myfilt", "${cat_s}:A", "ff", "-id:1", "ff", "-id:2", "json.facet", "{" + // empty filter list
    "t:{${terms} type:terms, field:${cat_s}, domain:{filter:[]} }" + ",t_filt:{${terms} type:terms, field:${cat_s}, domain:{filter:'${cat_s}:B'} }" + // test access to qparser and other query parameters
    ",t_filt2 :{${terms} type:terms, field:${cat_s}, domain:{filter:'{!query v=$myfilt}'} }" + // test filter via "param" type
    ",t_filt2a:{${terms} type:terms, field:${cat_s}, domain:{filter:{param:myfilt} } }" + ",t_filt3: {${terms} type:terms, field:${cat_s}, domain:{filter:['-id:1','-id:2']} }" + // test multi-valued query parameter
    ",t_filt3a:{${terms} type:terms, field:${cat_s}, domain:{filter:{param:ff}} }" + // also tests a top-level negative filter
    ",q:{type:query, q:'${cat_s}:B', domain:{filter:['-id:5']} }" + ",r:{type:range, field:${num_d}, start:-5, end:10, gap:5, domain:{filter:'-id:4'} }" + "}"), "facets=={ count:6, " + "t        :{ buckets:[ {val:B, count:3}, {val:A, count:2} ] }" + ",t_filt  :{ buckets:[ {val:B, count:3}] } " + ",t_filt2 :{ buckets:[ {val:A, count:2}] } " + ",t_filt2a:{ buckets:[ {val:A, count:2}] } " + ",t_filt3 :{ buckets:[ {val:B, count:2}, {val:A, count:1}] } " + ",t_filt3a:{ buckets:[ {val:B, count:2}, {val:A, count:1}] } " + ",q:{count:2}" + ",r:{buckets:[ {val:-5.0,count:1}, {val:0.0,count:1}, {val:5.0,count:0} ] }" + "}");
    // test acc reuse (i.e. reset() method).  This is normally used for stats that are not calculated in the first phase,
    // currently non-sorting stats.
    client.testJQ(params(p, "q", "*:*", "json.facet", "{f1:{type:terms, field:'${cat_s}', facet:{h:'hll(${where_s})' , u:'unique(${where_s})', mind:'min(${num_d})', maxd:'max(${num_d})', sumd:'sum(${num_d})', avgd:'avg(${num_d})', variance:'variance(${num_d})', stddev:'stddev(${num_d})'         }   }}"), "facets=={ 'count':6, " + "'f1':{  buckets:[{val:B, count:3, h:2, u:2, mind:-9.0, maxd:11.0, sumd:-3.0, avgd:-1.0, variance:74.66666666666667, stddev:8.640987597877148}," + "                 {val:A, count:2, h:2, u:2, mind:2.0, maxd:4.0, sumd:6.0, avgd:3.0, variance:1.0, stddev:1.0}] } } ");
    // test min/max of string field
    if (where_s.equals("where_s") || where_s.equals("where_sd")) {
        // supports only single valued currently...
        client.testJQ(params(// make NY the only value in bucket A
        p, // make NY the only value in bucket A
        "q", // make NY the only value in bucket A
        "*:* -(+${cat_s}:A +${where_s}:NJ)", "json.facet", "{" + "  f1:{type:terms, field:'${cat_s}', facet:{min:'min(${where_s})', max:'max(${where_s})'}   }" + ", f2:{type:terms, field:'${cat_s}', facet:{min:'min(${where_s})', max:'max(${where_s})'} , sort:'min desc'}" + ", f3:{type:terms, field:'${cat_s}', facet:{min:'min(${where_s})', max:'max(${where_s})'} , sort:'min asc'}" + ", f4:{type:terms, field:'${cat_s}', facet:{min:'min(${super_s})', max:'max(${super_s})'} , sort:'max asc'}" + ", f5:{type:terms, field:'${cat_s}', facet:{min:'min(${super_s})', max:'max(${super_s})'} , sort:'max desc'}" + "}"), "facets=={ count:5, " + " f1:{ buckets:[{val:B, count:3, min:NJ, max:NY}, {val:A, count:1, min:NY, max:NY}]}" + ",f2:{ buckets:[{val:A, count:1, min:NY, max:NY}, {val:B, count:3, min:NJ, max:NY}]}" + ",f3:{ buckets:[{val:B, count:3, min:NJ, max:NY}, {val:A, count:1, min:NY, max:NY}]}" + ",f4:{ buckets:[{val:B, count:3, min:batman, max:superman}, {val:A, count:1, min:zodiac, max:zodiac}]}" + ",f5:{ buckets:[{val:A, count:1, min:zodiac, max:zodiac}, {val:B, count:3, min:batman, max:superman}]}" + " } ");
    }
    ////////////////////////////////////////////////////////////////
    if (client.local()) {
        long creates, resets;
        // NOTE: these test the current implementation and may need to be adjusted to match future optimizations (such as calculating N buckets in parallel in the second phase)
        creates = DebugAgg.Acc.creates.get();
        resets = DebugAgg.Acc.resets.get();
        client.testJQ(params(p, "q", "*:*", // x should be deferred to 2nd phase
        "json.facet", // x should be deferred to 2nd phase
        "{f1:{terms:{${terms_method} field:${super_s}, limit:1, facet:{x:'debug()'}   }}}"), "facets=={ 'count':6, " + "f1:{  buckets:[{ val:batman, count:1, x:1}]} } ");
        assertEquals(1, DebugAgg.Acc.creates.get() - creates);
        assertTrue(DebugAgg.Acc.resets.get() - resets <= 1);
        // probably "1", but may be special slot for something.  As long as it's not cardinality of the field
        assertTrue(DebugAgg.Acc.last.numSlots <= 2);
        creates = DebugAgg.Acc.creates.get();
        resets = DebugAgg.Acc.resets.get();
        client.testJQ(params(p, "q", "*:*", // sorting by x... must be done all at once in first phase
        "json.facet", // sorting by x... must be done all at once in first phase
        "{f1:{terms:{${terms_method} field:${super_s}, limit:1, facet:{ x:'debug()'} , sort:'x asc'  }}}"), "facets=={ 'count':6, " + "f1:{  buckets:[{ val:batman, count:1, x:1}]}" + " } ");
        assertEquals(1, DebugAgg.Acc.creates.get() - creates);
        assertTrue(DebugAgg.Acc.resets.get() - resets == 0);
        // all slots should be done in a single shot. there may be more than 5 due to special slots or hashing.
        assertTrue(DebugAgg.Acc.last.numSlots >= 5);
        // When limit:-1, we should do most stats in first phase (SOLR-10634)
        creates = DebugAgg.Acc.creates.get();
        resets = DebugAgg.Acc.resets.get();
        client.testJQ(params(p, "q", "*:*", "json.facet", "{f1:{terms:{${terms_method} field:${super_s}, limit:-1, facet:{x:'debug()'}  }}}"), "facets==");
        assertEquals(1, DebugAgg.Acc.creates.get() - creates);
        assertTrue(DebugAgg.Acc.resets.get() - resets == 0);
        // all slots should be done in a single shot. there may be more than 5 due to special slots or hashing.
        assertTrue(DebugAgg.Acc.last.numSlots >= 5);
        // Now for a numeric field
        // When limit:-1, we should do most stats in first phase (SOLR-10634)
        creates = DebugAgg.Acc.creates.get();
        resets = DebugAgg.Acc.resets.get();
        client.testJQ(params(p, "q", "*:*", "json.facet", "{f1:{terms:{${terms_method} field:${num_d}, limit:-1, facet:{x:'debug()'}  }}}"), "facets==");
        assertEquals(1, DebugAgg.Acc.creates.get() - creates);
        assertTrue(DebugAgg.Acc.resets.get() - resets == 0);
        // all slots should be done in a single shot. there may be more than 5 due to special slots or hashing.
        assertTrue(DebugAgg.Acc.last.numSlots >= 5);
        // But if we need to calculate domains anyway, it probably makes sense to calculate most stats in the 2nd phase (along with sub-facets)
        creates = DebugAgg.Acc.creates.get();
        resets = DebugAgg.Acc.resets.get();
        client.testJQ(params(p, "q", "*:*", "json.facet", "{f1:{terms:{${terms_method} field:${super_s}, limit:-1, facet:{ x:'debug()' , y:{terms:${where_s}}   }  }}}"), "facets==");
        assertEquals(1, DebugAgg.Acc.creates.get() - creates);
        assertTrue(DebugAgg.Acc.resets.get() - resets >= 4);
        // probably 1, but could be higher
        assertTrue(DebugAgg.Acc.last.numSlots <= 2);
        // Now with a numeric field
        // But if we need to calculate domains anyway, it probably makes sense to calculate most stats in the 2nd phase (along with sub-facets)
        creates = DebugAgg.Acc.creates.get();
        resets = DebugAgg.Acc.resets.get();
        client.testJQ(params(p, "q", "*:*", "json.facet", "{f1:{terms:{${terms_method} field:${num_d}, limit:-1, facet:{ x:'debug()' , y:{terms:${where_s}}   }  }}}"), "facets==");
        assertEquals(1, DebugAgg.Acc.creates.get() - creates);
        assertTrue(DebugAgg.Acc.resets.get() - resets >= 4);
        // probably 1, but could be higher
        assertTrue(DebugAgg.Acc.last.numSlots <= 2);
    }
//////////////////////////////////////////////////////////////// end phase testing
}
Also used : SolrInputDocument(org.apache.solr.common.SolrInputDocument) MacroExpander(org.apache.solr.request.macro.MacroExpander) SolrClient(org.apache.solr.client.solrj.SolrClient) ModifiableSolrParams(org.apache.solr.common.params.ModifiableSolrParams)

Example 47 with SolrClient

use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.

the class TestDistribIDF method testSimpleQuery.

@Test
public void testSimpleQuery() throws Exception {
    //3 shards. 3rd shard won't have any data.
    createCollection("onecollection", "conf1", ImplicitDocRouter.NAME);
    createCollection("onecollection_local", "conf2", ImplicitDocRouter.NAME);
    SolrInputDocument doc = new SolrInputDocument();
    doc.setField("id", 1);
    doc.setField("cat", "football");
    doc.addField(ShardParams._ROUTE_, "a");
    solrCluster.getSolrClient().add("onecollection", doc);
    solrCluster.getSolrClient().add("onecollection_local", doc);
    doc = new SolrInputDocument();
    doc.setField("id", 2);
    doc.setField("cat", "football");
    doc.addField(ShardParams._ROUTE_, "b");
    solrCluster.getSolrClient().add("onecollection", doc);
    solrCluster.getSolrClient().add("onecollection_local", doc);
    int nDocs = TestUtil.nextInt(random(), 10, 100);
    for (int i = 0; i < nDocs; i++) {
        doc = new SolrInputDocument();
        doc.setField("id", 3 + i);
        String cat = TestUtil.randomSimpleString(random());
        if (!cat.equals("football")) {
            //Making sure no other document has the query term in it.
            doc.setField("cat", cat);
            if (rarely()) {
                //Put most documents in shard b so that 'football' becomes 'rare' in shard b
                doc.addField(ShardParams._ROUTE_, "a");
            } else {
                doc.addField(ShardParams._ROUTE_, "b");
            }
            solrCluster.getSolrClient().add("onecollection", doc);
            solrCluster.getSolrClient().add("onecollection_local", doc);
        }
    }
    solrCluster.getSolrClient().commit("onecollection");
    solrCluster.getSolrClient().commit("onecollection_local");
    //Test against all nodes
    for (JettySolrRunner jettySolrRunner : solrCluster.getJettySolrRunners()) {
        try (SolrClient solrClient = getHttpSolrClient(jettySolrRunner.getBaseUrl().toString())) {
            try (SolrClient solrClient_local = getHttpSolrClient(jettySolrRunner.getBaseUrl().toString())) {
                SolrQuery query = new SolrQuery("cat:football");
                query.setFields("*,score");
                QueryResponse queryResponse = solrClient.query("onecollection", query);
                assertEquals(2, queryResponse.getResults().getNumFound());
                float score1 = (float) queryResponse.getResults().get(0).get("score");
                float score2 = (float) queryResponse.getResults().get(1).get("score");
                assertEquals("Doc1 score=" + score1 + " Doc2 score=" + score2, 0, Float.compare(score1, score2));
                query = new SolrQuery("cat:football");
                query.setShowDebugInfo(true);
                query.setFields("*,score");
                queryResponse = solrClient_local.query("onecollection_local", query);
                assertEquals(2, queryResponse.getResults().getNumFound());
                assertEquals(2, queryResponse.getResults().get(0).get("id"));
                assertEquals(1, queryResponse.getResults().get(1).get("id"));
                float score1_local = (float) queryResponse.getResults().get(0).get("score");
                float score2_local = (float) queryResponse.getResults().get(1).get("score");
                assertEquals("Doc1 score=" + score1_local + " Doc2 score=" + score2_local, 1, Float.compare(score1_local, score2_local));
            }
        }
    }
}
Also used : SolrInputDocument(org.apache.solr.common.SolrInputDocument) SolrClient(org.apache.solr.client.solrj.SolrClient) JettySolrRunner(org.apache.solr.client.solrj.embedded.JettySolrRunner) QueryResponse(org.apache.solr.client.solrj.response.QueryResponse) SolrQuery(org.apache.solr.client.solrj.SolrQuery) Test(org.junit.Test)

Example 48 with SolrClient

use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.

the class TestDistribIDF method testMultiCollectionQuery.

@Test
public void testMultiCollectionQuery() throws Exception {
    // collection1 and collection2 are collections which have distributed idf enabled
    // collection1_local and collection2_local don't have distributed idf available
    // Only one doc has cat:football in each collection
    // When doing queries across collections we want to test that the query takes into account
    // distributed idf for the collection=collection1,collection2 query.
    // The way we verify is that score should be the same when querying across collection1 and collection2
    // But should be different when querying across collection1_local and collection2_local
    // since the idf is calculated per shard
    createCollection("collection1", "conf1");
    createCollection("collection1_local", "conf2");
    createCollection("collection2", "conf1");
    createCollection("collection2_local", "conf2");
    addDocsRandomly();
    //Test against all nodes
    for (JettySolrRunner jettySolrRunner : solrCluster.getJettySolrRunners()) {
        try (SolrClient solrClient = getHttpSolrClient(jettySolrRunner.getBaseUrl().toString())) {
            try (SolrClient solrClient_local = getHttpSolrClient(jettySolrRunner.getBaseUrl().toString())) {
                SolrQuery query = new SolrQuery("cat:football");
                query.setFields("*,score").add("collection", "collection1,collection2");
                QueryResponse queryResponse = solrClient.query("collection1", query);
                assertEquals(2, queryResponse.getResults().getNumFound());
                float score1 = (float) queryResponse.getResults().get(0).get("score");
                float score2 = (float) queryResponse.getResults().get(1).get("score");
                assertEquals("Doc1 score=" + score1 + " Doc2 score=" + score2, 0, Float.compare(score1, score2));
                query = new SolrQuery("cat:football");
                query.setFields("*,score").add("collection", "collection1_local,collection2_local");
                queryResponse = solrClient_local.query("collection1_local", query);
                assertEquals(2, queryResponse.getResults().getNumFound());
                assertEquals(2, queryResponse.getResults().get(0).get("id"));
                assertEquals(1, queryResponse.getResults().get(1).get("id"));
                float score1_local = (float) queryResponse.getResults().get(0).get("score");
                float score2_local = (float) queryResponse.getResults().get(1).get("score");
                assertEquals("Doc1 score=" + score1_local + " Doc2 score=" + score2_local, 1, Float.compare(score1_local, score2_local));
            }
        }
    }
}
Also used : SolrClient(org.apache.solr.client.solrj.SolrClient) JettySolrRunner(org.apache.solr.client.solrj.embedded.JettySolrRunner) QueryResponse(org.apache.solr.client.solrj.response.QueryResponse) SolrQuery(org.apache.solr.client.solrj.SolrQuery) Test(org.junit.Test)

Example 49 with SolrClient

use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.

the class TestDefaultStatsCache method dfQuery.

protected void dfQuery(Object... q) throws Exception {
    final ModifiableSolrParams params = new ModifiableSolrParams();
    for (int i = 0; i < q.length; i += 2) {
        params.add(q[i].toString(), q[i + 1].toString());
    }
    final QueryResponse controlRsp = controlClient.query(params);
    // query a random server
    params.set("shards", shards);
    int which = r.nextInt(clients.size());
    SolrClient client = clients.get(which);
    QueryResponse rsp = client.query(params);
    checkResponse(controlRsp, rsp);
}
Also used : SolrClient(org.apache.solr.client.solrj.SolrClient) QueryResponse(org.apache.solr.client.solrj.response.QueryResponse) ModifiableSolrParams(org.apache.solr.common.params.ModifiableSolrParams)

Example 50 with SolrClient

use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.

the class TestInPlaceUpdatesDistrib method reorderedDBQsResurrectionTest.

/* Test for a situation when a document requiring in-place update cannot be "resurrected"
   * when the original full indexed document has been deleted by an out of order DBQ.
   * Expected behaviour in this case should be to throw the replica into LIR (since this will
   * be rare). Here's an example of the situation:
        ADD(id=x, val=5, ver=1)
        UPD(id=x, val=10, ver = 2)
        DBQ(q=val:10, v=4)
        DV(id=x, val=5, ver=3)
   */
private void reorderedDBQsResurrectionTest() throws Exception {
    if (onlyLeaderIndexes) {
        log.info("RTG with DBQs are not working in tlog replicas");
        return;
    }
    clearIndex();
    commit();
    buildRandomIndex(0);
    // RTG straight from the index
    SolrDocument sdoc = LEADER.getById("0");
    //assertEquals(value, sdoc.get("inplace_updatable_float"));
    assertEquals("title0", sdoc.get("title_s"));
    long version0 = (long) sdoc.get("_version_");
    String field = "inplace_updatable_int";
    // put replica out of sync
    List<UpdateRequest> updates = new ArrayList<>();
    // full update
    updates.add(simulatedUpdateRequest(null, "id", 0, "title_s", "title0_new", field, 5, "_version_", version0 + 1));
    // inplace_updatable_float=101
    updates.add(simulatedUpdateRequest(version0 + 1, "id", 0, field, 10, "_version_", version0 + 2));
    // inplace_updatable_float=101
    updates.add(simulatedUpdateRequest(version0 + 2, "id", 0, field, 5, "_version_", version0 + 3));
    // supposed to not delete anything
    updates.add(simulatedDeleteRequest(field + ":10", version0 + 4));
    // order the updates correctly for NONLEADER 1
    for (UpdateRequest update : updates) {
        log.info("Issuing well ordered update: " + update.getDocuments());
        NONLEADERS.get(1).request(update);
    }
    // Reordering needs to happen using parallel threads
    ExecutorService threadpool = ExecutorUtil.newMDCAwareFixedThreadPool(updates.size() + 1, new DefaultSolrThreadFactory(getTestName()));
    // re-order the last two updates for NONLEADER 0
    List<UpdateRequest> reorderedUpdates = new ArrayList<>(updates);
    Collections.swap(reorderedUpdates, 2, 3);
    List<Future<UpdateResponse>> updateResponses = new ArrayList<>();
    for (UpdateRequest update : reorderedUpdates) {
        // pretend as this update is coming from the other non-leader, so that
        // the resurrection can happen from there (instead of the leader)
        update.setParam(DistributedUpdateProcessor.DISTRIB_FROM, ((HttpSolrClient) NONLEADERS.get(1)).getBaseURL());
        AsyncUpdateWithRandomCommit task = new AsyncUpdateWithRandomCommit(update, NONLEADERS.get(0), random().nextLong());
        updateResponses.add(threadpool.submit(task));
        // while we can't guarantee/trust what order the updates are executed in, since multiple threads
        // are involved, but we're trying to bias the thread scheduling to run them in the order submitted
        Thread.sleep(10);
    }
    threadpool.shutdown();
    assertTrue("Thread pool didn't terminate within 15 secs", threadpool.awaitTermination(15, TimeUnit.SECONDS));
    int successful = 0;
    for (Future<UpdateResponse> resp : updateResponses) {
        try {
            UpdateResponse r = resp.get();
            if (r.getStatus() == 0) {
                successful++;
            }
        } catch (Exception ex) {
            if (!ex.getMessage().contains("Tried to fetch missing update" + " from the leader, but missing wasn't present at leader.")) {
                throw ex;
            }
        }
    }
    // All should succeed, i.e. no LIR
    assertEquals(updateResponses.size(), successful);
    log.info("Non leader 0: " + ((HttpSolrClient) NONLEADERS.get(0)).getBaseURL());
    log.info("Non leader 1: " + ((HttpSolrClient) NONLEADERS.get(1)).getBaseURL());
    SolrDocument doc0 = NONLEADERS.get(0).getById(String.valueOf(0), params("distrib", "false"));
    SolrDocument doc1 = NONLEADERS.get(1).getById(String.valueOf(0), params("distrib", "false"));
    log.info("Doc in both replica 0: " + doc0);
    log.info("Doc in both replica 1: " + doc1);
    // assert both replicas have same effect
    for (int i = 0; i < NONLEADERS.size(); i++) {
        // 0th is re-ordered replica, 1st is well-ordered replica
        SolrClient client = NONLEADERS.get(i);
        SolrDocument doc = client.getById(String.valueOf(0), params("distrib", "false"));
        assertNotNull("Client: " + ((HttpSolrClient) client).getBaseURL(), doc);
        assertEquals("Client: " + ((HttpSolrClient) client).getBaseURL(), 5, doc.getFieldValue(field));
    }
    log.info("reorderedDBQsResurrectionTest: This test passed fine...");
    clearIndex();
    commit();
}
Also used : UpdateRequest(org.apache.solr.client.solrj.request.UpdateRequest) ArrayList(java.util.ArrayList) DefaultSolrThreadFactory(org.apache.solr.util.DefaultSolrThreadFactory) SolrServerException(org.apache.solr.client.solrj.SolrServerException) KeeperException(org.apache.zookeeper.KeeperException) IOException(java.io.IOException) HttpSolrClient(org.apache.solr.client.solrj.impl.HttpSolrClient) UpdateResponse(org.apache.solr.client.solrj.response.UpdateResponse) SolrDocument(org.apache.solr.common.SolrDocument) SolrClient(org.apache.solr.client.solrj.SolrClient) HttpSolrClient(org.apache.solr.client.solrj.impl.HttpSolrClient) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future)

Aggregations

SolrClient (org.apache.solr.client.solrj.SolrClient)172 HttpSolrClient (org.apache.solr.client.solrj.impl.HttpSolrClient)105 Test (org.junit.Test)67 CloudSolrClient (org.apache.solr.client.solrj.impl.CloudSolrClient)38 ArrayList (java.util.ArrayList)36 ModifiableSolrParams (org.apache.solr.common.params.ModifiableSolrParams)31 SolrQuery (org.apache.solr.client.solrj.SolrQuery)28 QueryResponse (org.apache.solr.client.solrj.response.QueryResponse)28 SolrInputDocument (org.apache.solr.common.SolrInputDocument)28 IOException (java.io.IOException)24 NamedList (org.apache.solr.common.util.NamedList)23 SolrServerException (org.apache.solr.client.solrj.SolrServerException)18 SolrException (org.apache.solr.common.SolrException)18 UpdateRequest (org.apache.solr.client.solrj.request.UpdateRequest)17 Map (java.util.Map)16 Replica (org.apache.solr.common.cloud.Replica)16 JettySolrRunner (org.apache.solr.client.solrj.embedded.JettySolrRunner)15 QueryRequest (org.apache.solr.client.solrj.request.QueryRequest)14 SolrDocument (org.apache.solr.common.SolrDocument)14 ZkStateReader (org.apache.solr.common.cloud.ZkStateReader)13