use of org.apache.solr.client.solrj.SolrClient in project nutch by apache.
the class SolrIndexWriter method push.
public void push() throws IOException {
if (inputDocs.size() > 0) {
try {
LOG.info("Indexing " + Integer.toString(inputDocs.size()) + "/" + Integer.toString(totalAdds) + " documents");
LOG.info("Deleting " + Integer.toString(numDeletes) + " documents");
numDeletes = 0;
UpdateRequest req = new UpdateRequest();
req.add(inputDocs);
req.setAction(AbstractUpdateRequest.ACTION.OPTIMIZE, false, false);
req.setParams(params);
for (SolrClient solrClient : solrClients) {
NamedList res = solrClient.request(req);
}
} catch (final SolrServerException e) {
throw makeIOException(e);
}
inputDocs.clear();
}
if (deleteIds.size() > 0) {
try {
LOG.info("SolrIndexer: deleting " + Integer.toString(deleteIds.size()) + "/" + Integer.toString(totalDeletes) + " documents");
for (SolrClient solrClient : solrClients) {
solrClient.deleteById(deleteIds);
}
} catch (final SolrServerException e) {
LOG.error("Error deleting: " + deleteIds);
throw makeIOException(e);
}
deleteIds.clear();
}
}
use of org.apache.solr.client.solrj.SolrClient in project camel by apache.
the class SolrComponentTestSupport method executeSolrQuery.
protected QueryResponse executeSolrQuery(String query) throws SolrServerException, IOException {
SolrQuery solrQuery = new SolrQuery();
solrQuery.setQuery(query);
SolrClient solrServer = solrFixtures.getServer();
return solrServer.query(solrQuery);
}
use of org.apache.solr.client.solrj.SolrClient in project spring-boot by spring-projects.
the class SolrHealthIndicatorTests method solrIsDown.
@Test
public void solrIsDown() throws Exception {
SolrClient solrClient = mock(SolrClient.class);
given(solrClient.ping()).willThrow(new IOException("Connection failed"));
SolrHealthIndicator healthIndicator = new SolrHealthIndicator(solrClient);
Health health = healthIndicator.health();
assertThat(health.getStatus()).isEqualTo(Status.DOWN);
assertThat(((String) health.getDetails().get("error")).contains("Connection failed"));
}
use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.
the class TestTolerantUpdateProcessorCloud method testSanity.
public void testSanity() throws Exception {
// verify some basic sanity checking of indexing & querying across the collection
// w/o using our custom update processor chain
assertEquals(0, CLOUD_CLIENT.add(doc(f("id", S_ONE_PRE + "1"), f("foo_i", 42))).getStatus());
assertEquals(0, CLOUD_CLIENT.add(doc(f("id", S_TWO_PRE + "2"), f("foo_i", 66))).getStatus());
assertEquals(0, CLOUD_CLIENT.commit().getStatus());
for (SolrClient c : Arrays.asList(S_ONE_LEADER_CLIENT, S_TWO_LEADER_CLIENT, S_ONE_NON_LEADER_CLIENT, S_TWO_NON_LEADER_CLIENT, NO_COLLECTION_CLIENT, CLOUD_CLIENT)) {
assertQueryDocIds(c, true, S_ONE_PRE + "1", S_TWO_PRE + "2");
assertQueryDocIds(c, false, "id_not_exists");
// verify adding 2 broken docs causes a clint exception
try {
UpdateResponse rsp = update(params(), doc(f("id", S_ONE_PRE + "X"), f("foo_i", "bogus_val_X")), doc(f("id", S_TWO_PRE + "Y"), f("foo_i", "bogus_val_Y"))).process(c);
fail("did not get a top level exception when more then 10 docs failed: " + rsp.toString());
} catch (SolrException e) {
assertEquals("not the type of error we were expecting (" + e.code() + "): " + e.toString(), 400, e.code());
}
// verify malformed deleteByQuerys fail
try {
UpdateResponse rsp = update(params()).deleteByQuery("foo_i:not_a_num").process(c);
fail("sanity check for malformed DBQ didn't fail: " + rsp.toString());
} catch (SolrException e) {
assertEquals("not the expected DBQ failure: " + e.getMessage(), 400, e.code());
}
// verify opportunistic concurrency deletions fail as we expect when docs are / aren't present
for (UpdateRequest r : new UpdateRequest[] { update(params("commit", "true")).deleteById(S_ONE_PRE + "1", -1L), update(params("commit", "true")).deleteById(S_TWO_PRE + "2", -1L), update(params("commit", "true")).deleteById("id_not_exists", 1L) }) {
try {
UpdateResponse rsp = r.process(c);
fail("sanity check for opportunistic concurrency delete didn't fail: " + r.toString() + " => " + rsp.toString());
} catch (SolrException e) {
assertEquals("not the expected opportunistic concurrency failure code: " + r.toString() + " => " + e.getMessage(), 409, e.code());
}
}
}
}
use of org.apache.solr.client.solrj.SolrClient in project lucene-solr by apache.
the class TestTolerantUpdateProcessorRandomCloud method testRandomUpdates.
public void testRandomUpdates() throws Exception {
final int maxDocId = atLeast(10000);
final BitSet expectedDocIds = new BitSet(maxDocId + 1);
final int numIters = atLeast(50);
for (int i = 0; i < numIters; i++) {
log.info("BEGIN ITER #{}", i);
final UpdateRequest req = update(params("maxErrors", "-1", "update.chain", "tolerant-chain-max-errors-10"));
final int numCmds = TestUtil.nextInt(random(), 1, 20);
final List<ExpectedErr> expectedErrors = new ArrayList<ExpectedErr>(numCmds);
int expectedErrorsCount = 0;
// it's ambigious/confusing which order mixed DELQ + ADD (or ADD and DELI for the same ID)
// in the same request wll be processed by various clients, so we keep things simple
// and ensure that no single doc Id is affected by more then one command in the same request
final BitSet docsAffectedThisRequest = new BitSet(maxDocId + 1);
for (int cmdIter = 0; cmdIter < numCmds; cmdIter++) {
if ((maxDocId / 2) < docsAffectedThisRequest.cardinality()) {
// we're already mucking with more then half the docs in the index
break;
}
final boolean causeError = random().nextBoolean();
if (causeError) {
expectedErrorsCount++;
}
if (random().nextBoolean()) {
// add a doc
String id = null;
SolrInputDocument doc = null;
if (causeError && (0 == TestUtil.nextInt(random(), 0, 21))) {
doc = doc(f("foo_s", "no unique key"));
expectedErrors.add(addErr("(unknown)"));
} else {
final int id_i = randomUnsetBit(random(), docsAffectedThisRequest, maxDocId);
docsAffectedThisRequest.set(id_i);
id = "id_" + id_i;
if (causeError) {
expectedErrors.add(addErr(id));
} else {
expectedDocIds.set(id_i);
}
final String val = causeError ? "bogus_val" : ("" + TestUtil.nextInt(random(), 42, 666));
doc = doc(f("id", id), f("id_i", id_i), f("foo_i", val));
}
req.add(doc);
log.info("ADD: {} = {}", id, doc);
} else {
// delete something
if (random().nextBoolean()) {
// delete by id
final int id_i = randomUnsetBit(random(), docsAffectedThisRequest, maxDocId);
final String id = "id_" + id_i;
final boolean docExists = expectedDocIds.get(id_i);
docsAffectedThisRequest.set(id_i);
long versionConstraint = docExists ? 1 : -1;
if (causeError) {
versionConstraint = -1 * versionConstraint;
expectedErrors.add(delIErr(id));
} else {
// if doc exists it will legitimately be deleted
expectedDocIds.clear(id_i);
}
req.deleteById(id, versionConstraint);
log.info("DEL: {} = {}", id, causeError ? "ERR" : "OK");
} else {
// delete by query
final String q;
if (causeError) {
// even though our DBQ is gibberish that's going to fail, record a docId as affected
// so that we don't generate the same random DBQ and get redundent errors
// (problematic because of how DUP forwarded DBQs have to have their errors deduped by TUP)
final int id_i = randomUnsetBit(random(), docsAffectedThisRequest, maxDocId);
docsAffectedThisRequest.set(id_i);
q = "foo_i:[" + id_i + " TO ....giberish";
expectedErrors.add(delQErr(q));
} else {
// ensure our DBQ is only over a range of docs not already affected
// by any other cmds in this request
final int rangeAxis = randomUnsetBit(random(), docsAffectedThisRequest, maxDocId);
final int loBound = docsAffectedThisRequest.previousSetBit(rangeAxis);
final int hiBound = docsAffectedThisRequest.nextSetBit(rangeAxis);
final int lo = TestUtil.nextInt(random(), loBound + 1, rangeAxis);
final int hi = TestUtil.nextInt(random(), rangeAxis, // bound might be negative if no set bits above axis
(hiBound < 0) ? maxDocId : hiBound - 1);
if (lo != hi) {
assert lo < hi : "lo=" + lo + " hi=" + hi;
// NOTE: clear & set are exclusive of hi, so we use "}" in range query accordingly
q = "id_i:[" + lo + " TO " + hi + "}";
expectedDocIds.clear(lo, hi);
docsAffectedThisRequest.set(lo, hi);
} else {
// edge case: special case DBQ of one doc
assert (lo == rangeAxis && hi == rangeAxis) : "lo=" + lo + " axis=" + rangeAxis + " hi=" + hi;
// have to be inclusive of both ends
q = "id_i:[" + lo + " TO " + lo + "]";
expectedDocIds.clear(lo);
docsAffectedThisRequest.set(lo);
}
}
req.deleteByQuery(q);
log.info("DEL: {}", q);
}
}
}
assertEquals("expected error count sanity check: " + req.toString(), expectedErrorsCount, expectedErrors.size());
final SolrClient client = random().nextBoolean() ? CLOUD_CLIENT : NODE_CLIENTS.get(TestUtil.nextInt(random(), 0, NODE_CLIENTS.size() - 1));
final UpdateResponse rsp = req.process(client);
assertUpdateTolerantErrors(client.toString() + " => " + expectedErrors.toString(), rsp, expectedErrors.toArray(new ExpectedErr[expectedErrors.size()]));
log.info("END ITER #{}, expecting #docs: {}", i, expectedDocIds.cardinality());
assertEquals("post update commit failed?", 0, CLOUD_CLIENT.commit().getStatus());
for (int j = 0; j < 5; j++) {
if (expectedDocIds.cardinality() == countDocs(CLOUD_CLIENT)) {
break;
}
log.info("sleeping to give searchers a chance to re-open #" + j);
Thread.sleep(200);
}
// check the index contents against our expectations
final BitSet actualDocIds = allDocs(CLOUD_CLIENT, maxDocId);
if (expectedDocIds.cardinality() != actualDocIds.cardinality()) {
log.error("cardinality mismatch: expected {} BUT actual {}", expectedDocIds.cardinality(), actualDocIds.cardinality());
}
final BitSet x = (BitSet) actualDocIds.clone();
x.xor(expectedDocIds);
for (int b = x.nextSetBit(0); 0 <= b; b = x.nextSetBit(b + 1)) {
final boolean expectedBit = expectedDocIds.get(b);
final boolean actualBit = actualDocIds.get(b);
log.error("bit #" + b + " mismatch: expected {} BUT actual {}", expectedBit, actualBit);
}
assertEquals(x.cardinality() + " mismatched bits", expectedDocIds.cardinality(), actualDocIds.cardinality());
}
}
Aggregations