use of org.apache.solr.client.solrj.response.UpdateResponse in project lucene-solr by apache.
the class TestInPlaceUpdatesDistrib method reorderedDBQsResurrectionTest.
/* Test for a situation when a document requiring in-place update cannot be "resurrected"
* when the original full indexed document has been deleted by an out of order DBQ.
* Expected behaviour in this case should be to throw the replica into LIR (since this will
* be rare). Here's an example of the situation:
ADD(id=x, val=5, ver=1)
UPD(id=x, val=10, ver = 2)
DBQ(q=val:10, v=4)
DV(id=x, val=5, ver=3)
*/
private void reorderedDBQsResurrectionTest() throws Exception {
if (onlyLeaderIndexes) {
log.info("RTG with DBQs are not working in tlog replicas");
return;
}
clearIndex();
commit();
buildRandomIndex(0);
// RTG straight from the index
SolrDocument sdoc = LEADER.getById("0");
//assertEquals(value, sdoc.get("inplace_updatable_float"));
assertEquals("title0", sdoc.get("title_s"));
long version0 = (long) sdoc.get("_version_");
String field = "inplace_updatable_int";
// put replica out of sync
List<UpdateRequest> updates = new ArrayList<>();
// full update
updates.add(simulatedUpdateRequest(null, "id", 0, "title_s", "title0_new", field, 5, "_version_", version0 + 1));
// inplace_updatable_float=101
updates.add(simulatedUpdateRequest(version0 + 1, "id", 0, field, 10, "_version_", version0 + 2));
// inplace_updatable_float=101
updates.add(simulatedUpdateRequest(version0 + 2, "id", 0, field, 5, "_version_", version0 + 3));
// supposed to not delete anything
updates.add(simulatedDeleteRequest(field + ":10", version0 + 4));
// order the updates correctly for NONLEADER 1
for (UpdateRequest update : updates) {
log.info("Issuing well ordered update: " + update.getDocuments());
NONLEADERS.get(1).request(update);
}
// Reordering needs to happen using parallel threads
ExecutorService threadpool = ExecutorUtil.newMDCAwareFixedThreadPool(updates.size() + 1, new DefaultSolrThreadFactory(getTestName()));
// re-order the last two updates for NONLEADER 0
List<UpdateRequest> reorderedUpdates = new ArrayList<>(updates);
Collections.swap(reorderedUpdates, 2, 3);
List<Future<UpdateResponse>> updateResponses = new ArrayList<>();
for (UpdateRequest update : reorderedUpdates) {
// pretend as this update is coming from the other non-leader, so that
// the resurrection can happen from there (instead of the leader)
update.setParam(DistributedUpdateProcessor.DISTRIB_FROM, ((HttpSolrClient) NONLEADERS.get(1)).getBaseURL());
AsyncUpdateWithRandomCommit task = new AsyncUpdateWithRandomCommit(update, NONLEADERS.get(0), random().nextLong());
updateResponses.add(threadpool.submit(task));
// while we can't guarantee/trust what order the updates are executed in, since multiple threads
// are involved, but we're trying to bias the thread scheduling to run them in the order submitted
Thread.sleep(10);
}
threadpool.shutdown();
assertTrue("Thread pool didn't terminate within 15 secs", threadpool.awaitTermination(15, TimeUnit.SECONDS));
int successful = 0;
for (Future<UpdateResponse> resp : updateResponses) {
try {
UpdateResponse r = resp.get();
if (r.getStatus() == 0) {
successful++;
}
} catch (Exception ex) {
if (!ex.getMessage().contains("Tried to fetch missing update" + " from the leader, but missing wasn't present at leader.")) {
throw ex;
}
}
}
// All should succeed, i.e. no LIR
assertEquals(updateResponses.size(), successful);
log.info("Non leader 0: " + ((HttpSolrClient) NONLEADERS.get(0)).getBaseURL());
log.info("Non leader 1: " + ((HttpSolrClient) NONLEADERS.get(1)).getBaseURL());
SolrDocument doc0 = NONLEADERS.get(0).getById(String.valueOf(0), params("distrib", "false"));
SolrDocument doc1 = NONLEADERS.get(1).getById(String.valueOf(0), params("distrib", "false"));
log.info("Doc in both replica 0: " + doc0);
log.info("Doc in both replica 1: " + doc1);
// assert both replicas have same effect
for (int i = 0; i < NONLEADERS.size(); i++) {
// 0th is re-ordered replica, 1st is well-ordered replica
SolrClient client = NONLEADERS.get(i);
SolrDocument doc = client.getById(String.valueOf(0), params("distrib", "false"));
assertNotNull("Client: " + ((HttpSolrClient) client).getBaseURL(), doc);
assertEquals("Client: " + ((HttpSolrClient) client).getBaseURL(), 5, doc.getFieldValue(field));
}
log.info("reorderedDBQsResurrectionTest: This test passed fine...");
clearIndex();
commit();
}
use of org.apache.solr.client.solrj.response.UpdateResponse in project lucene-solr by apache.
the class SolrExampleTests method testExampleConfig.
/**
* query the example
*/
@Test
public void testExampleConfig() throws Exception {
SolrClient client = getSolrClient();
// Empty the database...
// delete everything!
client.deleteByQuery("*:*");
// Now add something...
SolrInputDocument doc = new SolrInputDocument();
String docID = "1112211111";
doc.addField("id", docID);
doc.addField("name", "my name!");
Assert.assertEquals(null, doc.getField("foo"));
Assert.assertTrue(doc.getField("name").getValue() != null);
UpdateResponse upres = client.add(doc);
// System.out.println( "ADD:"+upres.getResponse() );
Assert.assertEquals(0, upres.getStatus());
upres = client.commit(true, true);
// System.out.println( "COMMIT:"+upres.getResponse() );
Assert.assertEquals(0, upres.getStatus());
upres = client.optimize(true, true);
// System.out.println( "OPTIMIZE:"+upres.getResponse() );
Assert.assertEquals(0, upres.getStatus());
SolrQuery query = new SolrQuery();
query.setQuery("id:" + docID);
QueryResponse response = client.query(query);
Assert.assertEquals(docID, response.getResults().get(0).getFieldValue("id"));
// Now add a few docs for facet testing...
List<SolrInputDocument> docs = new ArrayList<>();
SolrInputDocument doc2 = new SolrInputDocument();
doc2.addField("id", "2");
doc2.addField("inStock", true);
doc2.addField("price", 2);
doc2.addField("timestamp_dt", new java.util.Date());
docs.add(doc2);
SolrInputDocument doc3 = new SolrInputDocument();
doc3.addField("id", "3");
doc3.addField("inStock", false);
doc3.addField("price", 3);
doc3.addField("timestamp_dt", new java.util.Date());
docs.add(doc3);
SolrInputDocument doc4 = new SolrInputDocument();
doc4.addField("id", "4");
doc4.addField("inStock", true);
doc4.addField("price", 4);
doc4.addField("timestamp_dt", new java.util.Date());
docs.add(doc4);
SolrInputDocument doc5 = new SolrInputDocument();
doc5.addField("id", "5");
doc5.addField("inStock", false);
doc5.addField("price", 5);
doc5.addField("timestamp_dt", new java.util.Date());
docs.add(doc5);
upres = client.add(docs);
// System.out.println( "ADD:"+upres.getResponse() );
Assert.assertEquals(0, upres.getStatus());
upres = client.commit(true, true);
// System.out.println( "COMMIT:"+upres.getResponse() );
Assert.assertEquals(0, upres.getStatus());
upres = client.optimize(true, true);
// System.out.println( "OPTIMIZE:"+upres.getResponse() );
Assert.assertEquals(0, upres.getStatus());
query = new SolrQuery("*:*");
query.addFacetQuery("price:[* TO 2]");
query.addFacetQuery("price:[2 TO 4]");
query.addFacetQuery("price:[5 TO *]");
query.addFacetField("inStock");
query.addFacetField("price");
query.addFacetField("timestamp_dt");
query.removeFilterQuery("inStock:true");
response = client.query(query);
Assert.assertEquals(0, response.getStatus());
Assert.assertEquals(5, response.getResults().getNumFound());
Assert.assertEquals(3, response.getFacetQuery().size());
Assert.assertEquals(2, response.getFacetField("inStock").getValueCount());
Assert.assertEquals(4, response.getFacetField("price").getValueCount());
// test a second query, test making a copy of the main query
SolrQuery query2 = query.getCopy();
query2.addFilterQuery("inStock:true");
Assert.assertFalse(query.getFilterQueries() == query2.getFilterQueries());
response = client.query(query2);
Assert.assertEquals(1, query2.getFilterQueries().length);
Assert.assertEquals(0, response.getStatus());
Assert.assertEquals(2, response.getResults().getNumFound());
for (SolrDocument outDoc : response.getResults()) {
assertEquals(true, outDoc.getFieldValue("inStock"));
}
// sanity check round tripping of params...
query = new SolrQuery("foo");
query.addFilterQuery("{!field f=inStock}true");
query.addFilterQuery("{!term f=name}hoss");
query.addFacetQuery("price:[* TO 2]");
query.addFacetQuery("price:[2 TO 4]");
response = client.query(query);
assertTrue("echoed params are not a NamedList: " + response.getResponseHeader().get("params").getClass(), response.getResponseHeader().get("params") instanceof NamedList);
NamedList echo = (NamedList) response.getResponseHeader().get("params");
List values = null;
assertEquals("foo", echo.get("q"));
assertTrue("echoed fq is not a List: " + echo.get("fq").getClass(), echo.get("fq") instanceof List);
values = (List) echo.get("fq");
Assert.assertEquals(2, values.size());
Assert.assertEquals("{!field f=inStock}true", values.get(0));
Assert.assertEquals("{!term f=name}hoss", values.get(1));
assertTrue("echoed facet.query is not a List: " + echo.get("facet.query").getClass(), echo.get("facet.query") instanceof List);
values = (List) echo.get("facet.query");
Assert.assertEquals(2, values.size());
Assert.assertEquals("price:[* TO 2]", values.get(0));
Assert.assertEquals("price:[2 TO 4]", values.get(1));
if (jetty != null) {
// check system wide system handler + "/admin/info/system"
String url = jetty.getBaseUrl().toString();
try (HttpSolrClient adminClient = getHttpSolrClient(url)) {
SolrQuery q = new SolrQuery();
q.set("qt", "/admin/info/system");
QueryResponse rsp = adminClient.query(q);
assertNotNull(rsp.getResponse().get("mode"));
assertNotNull(rsp.getResponse().get("lucene"));
}
}
}
use of org.apache.solr.client.solrj.response.UpdateResponse in project lucene-solr by apache.
the class BaseDistributedSearchTestCase method indexDoc.
/**
* Indexes the document in both the control client and the specified client asserting
* that the respones are equivilent
*/
protected UpdateResponse indexDoc(SolrClient client, SolrParams params, SolrInputDocument... sdocs) throws IOException, SolrServerException {
UpdateResponse controlRsp = add(controlClient, params, sdocs);
UpdateResponse specificRsp = add(client, params, sdocs);
compareSolrResponses(specificRsp, controlRsp);
return specificRsp;
}
use of org.apache.solr.client.solrj.response.UpdateResponse in project lucene-solr by apache.
the class TestTolerantUpdateProcessorCloud method testVariousAdds.
protected static void testVariousAdds(SolrClient client) throws Exception {
assertNotNull("client not initialized", client);
UpdateResponse rsp = null;
// 2 docs that are both on shard1, the first one should fail
for (int maxErrors : new int[] { -1, 2, 47, 10 }) {
// regardless of which of these maxErrors values we use, behavior should be the same...
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "maxErrors", "" + maxErrors, "commit", "true"), doc(f("id", S_ONE_PRE + "42"), f("foo_i", "bogus_value")), doc(f("id", S_ONE_PRE + "666"), f("foo_i", "1976"))).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantAddErrors("single shard, 1st doc should fail", rsp, S_ONE_PRE + "42");
assertEquals(0, client.commit().getStatus());
assertQueryDocIds(client, false, S_ONE_PRE + "42");
assertQueryDocIds(client, true, S_ONE_PRE + "666");
// ...only diff should be that we get an accurate report of the effective maxErrors
assertEquals(maxErrors, rsp.getResponseHeader().get("maxErrors"));
}
// 2 docs that are both on shard1, the second one should fail
rsp = update(params("update.chain", "tolerant-chain-max-errors-not-set", "commit", "true"), doc(f("id", S_ONE_PRE + "55"), f("foo_i", "1976")), doc(f("id", S_ONE_PRE + "77"), f("foo_i", "bogus_val"))).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantAddErrors("single shard, 2nd doc should fail", rsp, S_ONE_PRE + "77");
assertQueryDocIds(client, false, S_ONE_PRE + "77");
assertQueryDocIds(client, true, S_ONE_PRE + "666", S_ONE_PRE + "55");
// since maxErrors is unset, we should get an "unlimited" value back
assertEquals(-1, rsp.getResponseHeader().get("maxErrors"));
// clean slate
assertEquals(0, client.deleteByQuery("*:*").getStatus());
// 2 docs on 2 diff shards, first of which should fail
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", S_ONE_PRE + "42"), f("foo_i", "bogus_value")), doc(f("id", S_TWO_PRE + "666"), f("foo_i", "1976"))).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantAddErrors("two shards, 1st doc should fail", rsp, S_ONE_PRE + "42");
assertEquals(0, client.commit().getStatus());
assertQueryDocIds(client, false, S_ONE_PRE + "42");
assertQueryDocIds(client, true, S_TWO_PRE + "666");
// 2 docs on 2 diff shards, second of which should fail
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", S_ONE_PRE + "55"), f("foo_i", "1976")), doc(f("id", S_TWO_PRE + "77"), f("foo_i", "bogus_val"))).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantAddErrors("two shards, 2nd doc should fail", rsp, S_TWO_PRE + "77");
assertQueryDocIds(client, false, S_TWO_PRE + "77");
assertQueryDocIds(client, true, S_TWO_PRE + "666", S_ONE_PRE + "55");
// clean slate
assertEquals(0, client.deleteByQuery("*:*").getStatus());
// many docs from diff shards, 1 from each shard should fail
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", S_ONE_PRE + "11")), doc(f("id", S_TWO_PRE + "21")), doc(f("id", S_ONE_PRE + "12")), doc(f("id", S_TWO_PRE + "22"), f("foo_i", "bogus_val")), doc(f("id", S_ONE_PRE + "13")), doc(f("id", S_TWO_PRE + "23")), doc(f("id", S_ONE_PRE + "14")), doc(f("id", S_TWO_PRE + "24")), doc(f("id", S_ONE_PRE + "15"), f("foo_i", "bogus_val")), doc(f("id", S_TWO_PRE + "25")), doc(f("id", S_ONE_PRE + "16")), doc(f("id", S_TWO_PRE + "26"))).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantAddErrors("many docs, 1 from each shard should fail", rsp, S_ONE_PRE + "15", S_TWO_PRE + "22");
assertQueryDocIds(client, false, S_TWO_PRE + "22", S_ONE_PRE + "15");
assertQueryDocIds(client, true, S_ONE_PRE + "11", S_TWO_PRE + "21", S_ONE_PRE + "12", S_ONE_PRE + "13", S_TWO_PRE + "23", S_ONE_PRE + "14", S_TWO_PRE + "24", S_TWO_PRE + "25", S_ONE_PRE + "16", S_TWO_PRE + "26");
// clean slate
assertEquals(0, client.deleteByQuery("*:*").getStatus());
// many docs from diff shards, 1 from each shard should fail and 1 w/o uniqueKey
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", S_ONE_PRE + "11")), doc(f("id", S_TWO_PRE + "21")), doc(f("id", S_ONE_PRE + "12")), doc(f("id", S_TWO_PRE + "22"), f("foo_i", "bogus_val")), doc(f("id", S_ONE_PRE + "13")), doc(f("id", S_TWO_PRE + "23")), // no "id"
doc(f("foo_i", "42")), doc(f("id", S_ONE_PRE + "14")), doc(f("id", S_TWO_PRE + "24")), doc(f("id", S_ONE_PRE + "15"), f("foo_i", "bogus_val")), doc(f("id", S_TWO_PRE + "25")), doc(f("id", S_ONE_PRE + "16")), doc(f("id", S_TWO_PRE + "26"))).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantAddErrors("many docs, 1 from each shard (+ no id) should fail", rsp, S_ONE_PRE + "15", "(unknown)", S_TWO_PRE + "22");
assertQueryDocIds(client, false, S_TWO_PRE + "22", S_ONE_PRE + "15");
assertQueryDocIds(client, true, S_ONE_PRE + "11", S_TWO_PRE + "21", S_ONE_PRE + "12", S_ONE_PRE + "13", S_TWO_PRE + "23", S_ONE_PRE + "14", S_TWO_PRE + "24", S_TWO_PRE + "25", S_ONE_PRE + "16", S_TWO_PRE + "26");
// clean slate
assertEquals(0, client.deleteByQuery("*:*").getStatus());
try {
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", S_ONE_PRE + "11")), doc(f("id", S_TWO_PRE + "21"), f("foo_i", "bogus_val")), doc(f("id", S_ONE_PRE + "12")), doc(f("id", S_TWO_PRE + "22"), f("foo_i", "bogus_val")), doc(f("id", S_ONE_PRE + "13")), doc(f("id", S_TWO_PRE + "23"), f("foo_i", "bogus_val")), doc(f("id", S_ONE_PRE + "14"), f("foo_i", "bogus_val")), doc(f("id", S_TWO_PRE + "24")), doc(f("id", S_ONE_PRE + "15"), f("foo_i", "bogus_val")), doc(f("id", S_TWO_PRE + "25")), doc(f("id", S_ONE_PRE + "16"), f("foo_i", "bogus_val")), doc(f("id", S_TWO_PRE + "26"), f("foo_i", "bogus_val")), doc(f("id", S_ONE_PRE + "17")), doc(f("id", S_TWO_PRE + "27")), doc(f("id", S_ONE_PRE + "18"), f("foo_i", "bogus_val")), doc(f("id", S_TWO_PRE + "28"), f("foo_i", "bogus_val")), doc(f("id", S_ONE_PRE + "19"), f("foo_i", "bogus_val")), doc(f("id", S_TWO_PRE + "29"), f("foo_i", "bogus_val")), // may be skipped, more then 10 fails
doc(f("id", S_ONE_PRE + "10")), // may be skipped, more then 10 fails
doc(f("id", S_TWO_PRE + "20"))).process(client);
fail("did not get a top level exception when more then 10 docs failed: " + rsp.toString());
} catch (SolrException e) {
// we can't make any reliable assertions about the error message, because
// it varies based on how the request was routed -- see SOLR-8830
assertEquals("not the type of error we were expecting (" + e.code() + "): " + e.toString(), // on a single node setup -- a 5xx type error isn't something we should have triggered
400, e.code());
// verify that the Exceptions metadata can tell us what failed.
NamedList<String> remoteErrMetadata = e.getMetadata();
assertNotNull("no metadata in: " + e.toString(), remoteErrMetadata);
Set<ToleratedUpdateError> actualKnownErrs = new LinkedHashSet<ToleratedUpdateError>(remoteErrMetadata.size());
int actualKnownErrsCount = 0;
for (int i = 0; i < remoteErrMetadata.size(); i++) {
ToleratedUpdateError err = ToleratedUpdateError.parseMetadataIfToleratedUpdateError(remoteErrMetadata.getName(i), remoteErrMetadata.getVal(i));
if (null == err) {
// some metadata unrelated to this update processor
continue;
}
actualKnownErrsCount++;
actualKnownErrs.add(err);
}
assertEquals("wrong number of errors in metadata: " + remoteErrMetadata.toString(), 11, actualKnownErrsCount);
assertEquals("at least one dup error in metadata: " + remoteErrMetadata.toString(), actualKnownErrsCount, actualKnownErrs.size());
for (ToleratedUpdateError err : actualKnownErrs) {
assertEquals("only expected type of error is ADD: " + err, CmdType.ADD, err.getType());
assertTrue("failed err msg didn't match expected value: " + err, err.getMessage().contains("bogus_val"));
}
}
// need to force since update didn't finish
assertEquals(0, client.commit().getStatus());
assertQueryDocIds(client, false, // explicitly failed
S_TWO_PRE + "21", S_TWO_PRE + "22", S_TWO_PRE + "23", S_ONE_PRE + "14", S_ONE_PRE + "15", S_ONE_PRE + "16", S_TWO_PRE + "26", S_ONE_PRE + "18", S_TWO_PRE + "28", S_ONE_PRE + "19", S_TWO_PRE + "29");
assertQueryDocIds(client, true, S_ONE_PRE + "11", S_ONE_PRE + "12", S_ONE_PRE + "13", S_TWO_PRE + "24", S_TWO_PRE + "25", S_ONE_PRE + "17", S_TWO_PRE + "27");
// clean slate
assertEquals(0, client.deleteByQuery("*:*").getStatus());
try {
ArrayList<SolrInputDocument> docs = new ArrayList<SolrInputDocument>(30);
docs.add(doc(f("id", S_ONE_PRE + "z")));
docs.add(doc(f("id", S_TWO_PRE + "z")));
docs.add(doc(f("id", S_ONE_PRE + "y")));
docs.add(doc(f("id", S_TWO_PRE + "y")));
for (int i = 0; i < 11; i++) {
docs.add(doc(f("id", S_ONE_PRE + i)));
docs.add(doc(f("id", S_TWO_PRE + i), f("foo_i", "bogus_val")));
}
// may be skipped, more then 10 fails
docs.add(doc(f("id", S_ONE_PRE + "x")));
// may be skipped, more then 10 fails
docs.add(doc(f("id", S_TWO_PRE + "x")));
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), docs.toArray(new SolrInputDocument[docs.size()])).process(client);
fail("did not get a top level exception when more then 10 docs failed: " + rsp.toString());
} catch (SolrException e) {
// we can't make any reliable assertions about the error message, because
// it varies based on how the request was routed -- see SOLR-8830
assertEquals("not the type of error we were expecting (" + e.code() + "): " + e.toString(), // on a single node setup -- a 5xx type error isn't something we should have triggered
400, e.code());
// verify that the Exceptions metadata can tell us what failed.
NamedList<String> remoteErrMetadata = e.getMetadata();
assertNotNull("no metadata in: " + e.toString(), remoteErrMetadata);
Set<ToleratedUpdateError> actualKnownErrs = new LinkedHashSet<ToleratedUpdateError>(remoteErrMetadata.size());
int actualKnownErrsCount = 0;
for (int i = 0; i < remoteErrMetadata.size(); i++) {
ToleratedUpdateError err = ToleratedUpdateError.parseMetadataIfToleratedUpdateError(remoteErrMetadata.getName(i), remoteErrMetadata.getVal(i));
if (null == err) {
// some metadata unrelated to this update processor
continue;
}
actualKnownErrsCount++;
actualKnownErrs.add(err);
}
assertEquals("wrong number of errors in metadata: " + remoteErrMetadata.toString(), 11, actualKnownErrsCount);
assertEquals("at least one dup error in metadata: " + remoteErrMetadata.toString(), actualKnownErrsCount, actualKnownErrs.size());
for (ToleratedUpdateError err : actualKnownErrs) {
assertEquals("only expected type of error is ADD: " + err, CmdType.ADD, err.getType());
assertTrue("failed id had unexpected prefix: " + err, err.getId().startsWith(S_TWO_PRE));
assertTrue("failed err msg didn't match expected value: " + err, err.getMessage().contains("bogus_val"));
}
}
// need to force since update didn't finish
assertEquals(0, client.commit().getStatus());
assertQueryDocIds(client, true, // first
S_ONE_PRE + "z", // first
S_ONE_PRE + "y", // first
S_TWO_PRE + "z", // first
S_TWO_PRE + "y", //
S_ONE_PRE + "0", S_ONE_PRE + "1", S_ONE_PRE + "2", S_ONE_PRE + "3", S_ONE_PRE + "4", S_ONE_PRE + "5", S_ONE_PRE + "6", S_ONE_PRE + "7", S_ONE_PRE + "8", S_ONE_PRE + "9");
assertQueryDocIds(client, false, // explicitly failed
S_TWO_PRE + "0", S_TWO_PRE + "1", S_TWO_PRE + "2", S_TWO_PRE + "3", S_TWO_PRE + "4", S_TWO_PRE + "5", S_TWO_PRE + "6", S_TWO_PRE + "7", S_TWO_PRE + "8", S_TWO_PRE + "9");
// clean slate
assertEquals(0, client.deleteByQuery("*:*").getStatus());
try {
ArrayList<SolrInputDocument> docs = new ArrayList<SolrInputDocument>(30);
docs.add(doc(f("id", S_ONE_PRE + "z")));
docs.add(doc(f("id", S_TWO_PRE + "z")));
docs.add(doc(f("id", S_ONE_PRE + "y")));
docs.add(doc(f("id", S_TWO_PRE + "y")));
for (int i = 0; i < 11; i++) {
// no "id" field
docs.add(doc(f("foo_i", "" + i)));
}
// may be skipped, more then 10 fails
docs.add(doc(f("id", S_ONE_PRE + "x")));
// may be skipped, more then 10 fails
docs.add(doc(f("id", S_TWO_PRE + "x")));
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), docs.toArray(new SolrInputDocument[docs.size()])).process(client);
fail("did not get a top level exception when more then 10 docs mising uniqueKey: " + rsp.toString());
} catch (SolrException e) {
// we can't make any reliable assertions about the error message, because
// it varies based on how the request was routed -- see SOLR-8830
assertEquals("not the type of error we were expecting (" + e.code() + "): " + e.toString(), // on a single node setup -- a 5xx type error isn't something we should have triggered
400, e.code());
// verify that the Exceptions metadata can tell us what failed.
NamedList<String> remoteErrMetadata = e.getMetadata();
assertNotNull("no metadata in: " + e.toString(), remoteErrMetadata);
int actualKnownErrsCount = 0;
for (int i = 0; i < remoteErrMetadata.size(); i++) {
ToleratedUpdateError err = ToleratedUpdateError.parseMetadataIfToleratedUpdateError(remoteErrMetadata.getName(i), remoteErrMetadata.getVal(i));
if (null == err) {
// some metadata unrelated to this update processor
continue;
}
actualKnownErrsCount++;
assertEquals("only expected type of error is ADD: " + err, CmdType.ADD, err.getType());
assertTrue("failed id didn't match 'unknown': " + err, err.getId().contains("unknown"));
}
assertEquals("wrong number of errors in metadata: " + remoteErrMetadata.toString(), 11, actualKnownErrsCount);
}
// need to force since update didn't finish
assertEquals(0, client.commit().getStatus());
assertQueryDocIds(client, true, // first
S_ONE_PRE + "z", // first
S_ONE_PRE + "y", // first
S_TWO_PRE + "z", // first
S_TWO_PRE + "y");
// clean slate
assertEquals(0, client.deleteByQuery("*:*").getStatus());
// many docs from diff shards, more then 10 from a single shard (two) should fail but
// request should still succeed because of maxErrors=-1 param
ArrayList<SolrInputDocument> docs = new ArrayList<SolrInputDocument>(30);
ArrayList<ExpectedErr> expectedErrs = new ArrayList<ExpectedErr>(30);
docs.add(doc(f("id", S_ONE_PRE + "z")));
docs.add(doc(f("id", S_TWO_PRE + "z")));
docs.add(doc(f("id", S_ONE_PRE + "y")));
docs.add(doc(f("id", S_TWO_PRE + "y")));
for (int i = 0; i < 11; i++) {
docs.add(doc(f("id", S_ONE_PRE + i)));
docs.add(doc(f("id", S_TWO_PRE + i), f("foo_i", "bogus_val")));
expectedErrs.add(addErr(S_TWO_PRE + i));
}
docs.add(doc(f("id", S_ONE_PRE + "x")));
docs.add(doc(f("id", S_TWO_PRE + "x")));
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "maxErrors", "-1", "commit", "true"), docs.toArray(new SolrInputDocument[docs.size()])).process(client);
assertUpdateTolerantErrors("many docs from shard2 fail, but req should succeed", rsp, expectedErrs.toArray(new ExpectedErr[expectedErrs.size()]));
assertQueryDocIds(client, true, // first
S_ONE_PRE + "z", // first
S_ONE_PRE + "y", // first
S_TWO_PRE + "z", // first
S_TWO_PRE + "y", // later
S_ONE_PRE + "x", // later
S_TWO_PRE + "x");
}
use of org.apache.solr.client.solrj.response.UpdateResponse in project lucene-solr by apache.
the class TestStressInPlaceUpdates method addDocAndGetVersion.
@SuppressWarnings("rawtypes")
protected long addDocAndGetVersion(Object... fields) throws Exception {
SolrInputDocument doc = new SolrInputDocument();
addFields(doc, fields);
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("versions", "true");
UpdateRequest ureq = new UpdateRequest();
ureq.setParams(params);
ureq.add(doc);
UpdateResponse resp;
// send updates to leader, to avoid SOLR-8733
resp = ureq.process(leaderClient);
long returnedVersion = Long.parseLong(((NamedList) resp.getResponse().get("adds")).getVal(0).toString());
assertTrue("Due to SOLR-8733, sometimes returned version is 0. Let us assert that we have successfully" + " worked around that problem here.", returnedVersion > 0);
return returnedVersion;
}
Aggregations