use of org.apache.solr.client.solrj.SolrServerException in project lucene-solr by apache.
the class MissingSegmentRecoveryTest method setup.
@Before
public void setup() throws SolrServerException, IOException {
CollectionAdminRequest.createCollection(collection, "conf", 1, 2).setMaxShardsPerNode(1).process(cluster.getSolrClient());
waitForState("Expected a collection with one shard and two replicas", collection, clusterShape(1, 2));
cluster.getSolrClient().setDefaultCollection(collection);
List<SolrInputDocument> docs = new ArrayList<>();
for (int i = 0; i < 10; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", i);
docs.add(doc);
}
cluster.getSolrClient().add(docs);
cluster.getSolrClient().commit();
DocCollection state = getCollectionState(collection);
leader = state.getLeader("shard1");
replica = getRandomReplica(state.getSlice("shard1"), (r) -> leader != r);
}
use of org.apache.solr.client.solrj.SolrServerException in project lucene-solr by apache.
the class UnloadDistributedZkTest method testUnloadLotsOfCores.
private void testUnloadLotsOfCores() throws Exception {
SolrClient client = clients.get(2);
String url3 = getBaseUrl(client);
try (final HttpSolrClient adminClient = getHttpSolrClient(url3)) {
adminClient.setConnectionTimeout(15000);
adminClient.setSoTimeout(60000);
int cnt = atLeast(3);
ThreadPoolExecutor executor = new ExecutorUtil.MDCAwareThreadPoolExecutor(0, Integer.MAX_VALUE, 5, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), new DefaultSolrThreadFactory("testExecutor"));
try {
// create the cores
createCores(adminClient, executor, "multiunload", 2, cnt);
} finally {
ExecutorUtil.shutdownAndAwaitTermination(executor);
}
executor = new ExecutorUtil.MDCAwareThreadPoolExecutor(0, Integer.MAX_VALUE, 5, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), new DefaultSolrThreadFactory("testExecutor"));
try {
for (int j = 0; j < cnt; j++) {
final int freezeJ = j;
executor.execute(() -> {
Unload unloadCmd = new Unload(true);
unloadCmd.setCoreName("multiunload" + freezeJ);
try {
adminClient.request(unloadCmd);
} catch (SolrServerException | IOException e) {
throw new RuntimeException(e);
}
});
Thread.sleep(random().nextInt(50));
}
} finally {
ExecutorUtil.shutdownAndAwaitTermination(executor);
}
}
}
use of org.apache.solr.client.solrj.SolrServerException in project lucene-solr by apache.
the class AbstractFullDistribZkTestBase method showCounts.
public void showCounts() {
Set<String> theShards = shardToJetty.keySet();
for (String shard : theShards) {
List<CloudJettyRunner> solrJetties = shardToJetty.get(shard);
for (CloudJettyRunner cjetty : solrJetties) {
ZkNodeProps props = cjetty.info;
System.err.println("PROPS:" + props);
try {
SolrParams query = params("q", "*:*", "rows", "0", "distrib", "false", "tests", // "tests" is just a
"checkShardConsistency");
// tag that won't do
// anything except be
// echoed in logs
long num = cjetty.client.solrClient.query(query).getResults().getNumFound();
System.err.println("DOCS:" + num);
} catch (SolrServerException | SolrException | IOException e) {
System.err.println("error contacting client: " + e.getMessage() + "\n");
continue;
}
boolean live = false;
String nodeName = props.getStr(ZkStateReader.NODE_NAME_PROP);
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
if (zkStateReader.getClusterState().liveNodesContain(nodeName)) {
live = true;
}
System.err.println(" live:" + live);
}
}
}
use of org.apache.solr.client.solrj.SolrServerException in project lucene-solr by apache.
the class TestCloudPivotFacet method assertPivotCountsAreCorrect.
/**
* Given some query params, executes the request against the cloudClient and
* then walks the pivot facet values in the response, treating each one as a
* filter query to assert the pivot counts are correct.
*/
private void assertPivotCountsAreCorrect(SolrParams baseParams, SolrParams pivotParams) throws SolrServerException {
SolrParams initParams = SolrParams.wrapAppended(pivotParams, baseParams);
log.info("Doing full run: {}", initParams);
countNumFoundChecks = 0;
NamedList<List<PivotField>> pivots = null;
try {
QueryResponse initResponse = cloudClient.query(initParams);
pivots = initResponse.getFacetPivot();
assertNotNull(initParams + " has null pivots?", pivots);
assertEquals(initParams + " num pivots", initParams.getParams("facet.pivot").length, pivots.size());
} catch (Exception e) {
throw new RuntimeException("init query failed: " + initParams + ": " + e.getMessage(), e);
}
try {
for (Map.Entry<String, List<PivotField>> pivot : pivots) {
final String pivotKey = pivot.getKey();
// :HACK: for counting the max possible pivot depth
final int maxDepth = 1 + pivotKey.length() - pivotKey.replace(",", "").length();
assertTraceOk(pivotKey, baseParams, pivot.getValue());
// will catch it.
for (PivotField constraint : pivot.getValue()) {
int depth = assertPivotCountsAreCorrect(pivotKey, baseParams, constraint);
// we can't assert that the depth reached is the same as the depth requested
// because the fq and/or mincount may have pruned the tree too much
assertTrue("went too deep: " + depth + ": " + pivotKey + " ==> " + pivot, depth <= maxDepth);
}
}
} catch (AssertionError e) {
throw new AssertionError(initParams + " ==> " + e.getMessage(), e);
} finally {
log.info("Ending full run (countNumFoundChecks={}): {}", countNumFoundChecks, initParams);
}
}
use of org.apache.solr.client.solrj.SolrServerException in project lucene-solr by apache.
the class StoppableIndexingThread method run.
@Override
public void run() {
int i = 0;
int numDone = 0;
numDeletes = 0;
numAdds = 0;
while (true && !stop) {
if (numCycles != -1) {
if (numDone > numCycles) {
break;
}
}
++numDone;
String id = this.id + "-" + i;
++i;
boolean addFailed = false;
if (doDeletes && AbstractFullDistribZkTestBase.random().nextBoolean() && deletes.size() > 0) {
String deleteId = deletes.remove(0);
try {
numDeletes++;
if (controlClient != null) {
UpdateRequest req = new UpdateRequest();
req.deleteById(deleteId);
req.setParam("CONTROL", "TRUE");
req.process(controlClient);
}
cloudClient.deleteById(deleteId);
} catch (Exception e) {
System.err.println("REQUEST FAILED for id=" + deleteId);
e.printStackTrace();
if (e instanceof SolrServerException) {
System.err.println("ROOT CAUSE for id=" + deleteId);
((SolrServerException) e).getRootCause().printStackTrace();
}
deleteFails.add(deleteId);
}
}
try {
numAdds++;
SolrInputDocument doc = new SolrInputDocument();
addFields(doc, "id", id, i1, 50, t1, "to come to the aid of their country.");
addFields(doc, "rnd_b", true);
docs.add(doc);
if (docs.size() >= batchSize) {
indexDocs(docs);
docs.clear();
}
} catch (Exception e) {
addFailed = true;
System.err.println("REQUEST FAILED for id=" + id);
e.printStackTrace();
if (e instanceof SolrServerException) {
System.err.println("ROOT CAUSE for id=" + id);
((SolrServerException) e).getRootCause().printStackTrace();
}
addFails.add(id);
}
if (!addFailed && doDeletes && AbstractFullDistribZkTestBase.random().nextBoolean()) {
deletes.add(id);
}
if (docs.size() > 0 && pauseBetweenUpdates) {
try {
Thread.sleep(AbstractFullDistribZkTestBase.random().nextInt(500) + 50);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
}
System.err.println("added docs:" + numAdds + " with " + (addFails.size() + deleteFails.size()) + " fails" + " deletes:" + numDeletes);
}
Aggregations