use of org.apache.solr.common.ToleratedUpdateError in project lucene-solr by apache.
the class TestToleratedUpdateError method compare.
public void compare(ToleratedUpdateError in, Coppier coppier) {
ToleratedUpdateError out = coppier.copy(in);
assertNotNull(out);
compare(in, out);
}
use of org.apache.solr.common.ToleratedUpdateError in project lucene-solr by apache.
the class TestTolerantUpdateProcessorCloud method testVariousAdds.
protected static void testVariousAdds(SolrClient client) throws Exception {
assertNotNull("client not initialized", client);
UpdateResponse rsp = null;
// 2 docs that are both on shard1, the first one should fail
for (int maxErrors : new int[] { -1, 2, 47, 10 }) {
// regardless of which of these maxErrors values we use, behavior should be the same...
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "maxErrors", "" + maxErrors, "commit", "true"), doc(f("id", S_ONE_PRE + "42"), f("foo_i", "bogus_value")), doc(f("id", S_ONE_PRE + "666"), f("foo_i", "1976"))).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantAddErrors("single shard, 1st doc should fail", rsp, S_ONE_PRE + "42");
assertEquals(0, client.commit().getStatus());
assertQueryDocIds(client, false, S_ONE_PRE + "42");
assertQueryDocIds(client, true, S_ONE_PRE + "666");
// ...only diff should be that we get an accurate report of the effective maxErrors
assertEquals(maxErrors, rsp.getResponseHeader().get("maxErrors"));
}
// 2 docs that are both on shard1, the second one should fail
rsp = update(params("update.chain", "tolerant-chain-max-errors-not-set", "commit", "true"), doc(f("id", S_ONE_PRE + "55"), f("foo_i", "1976")), doc(f("id", S_ONE_PRE + "77"), f("foo_i", "bogus_val"))).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantAddErrors("single shard, 2nd doc should fail", rsp, S_ONE_PRE + "77");
assertQueryDocIds(client, false, S_ONE_PRE + "77");
assertQueryDocIds(client, true, S_ONE_PRE + "666", S_ONE_PRE + "55");
// since maxErrors is unset, we should get an "unlimited" value back
assertEquals(-1, rsp.getResponseHeader().get("maxErrors"));
// clean slate
assertEquals(0, client.deleteByQuery("*:*").getStatus());
// 2 docs on 2 diff shards, first of which should fail
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", S_ONE_PRE + "42"), f("foo_i", "bogus_value")), doc(f("id", S_TWO_PRE + "666"), f("foo_i", "1976"))).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantAddErrors("two shards, 1st doc should fail", rsp, S_ONE_PRE + "42");
assertEquals(0, client.commit().getStatus());
assertQueryDocIds(client, false, S_ONE_PRE + "42");
assertQueryDocIds(client, true, S_TWO_PRE + "666");
// 2 docs on 2 diff shards, second of which should fail
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", S_ONE_PRE + "55"), f("foo_i", "1976")), doc(f("id", S_TWO_PRE + "77"), f("foo_i", "bogus_val"))).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantAddErrors("two shards, 2nd doc should fail", rsp, S_TWO_PRE + "77");
assertQueryDocIds(client, false, S_TWO_PRE + "77");
assertQueryDocIds(client, true, S_TWO_PRE + "666", S_ONE_PRE + "55");
// clean slate
assertEquals(0, client.deleteByQuery("*:*").getStatus());
// many docs from diff shards, 1 from each shard should fail
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", S_ONE_PRE + "11")), doc(f("id", S_TWO_PRE + "21")), doc(f("id", S_ONE_PRE + "12")), doc(f("id", S_TWO_PRE + "22"), f("foo_i", "bogus_val")), doc(f("id", S_ONE_PRE + "13")), doc(f("id", S_TWO_PRE + "23")), doc(f("id", S_ONE_PRE + "14")), doc(f("id", S_TWO_PRE + "24")), doc(f("id", S_ONE_PRE + "15"), f("foo_i", "bogus_val")), doc(f("id", S_TWO_PRE + "25")), doc(f("id", S_ONE_PRE + "16")), doc(f("id", S_TWO_PRE + "26"))).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantAddErrors("many docs, 1 from each shard should fail", rsp, S_ONE_PRE + "15", S_TWO_PRE + "22");
assertQueryDocIds(client, false, S_TWO_PRE + "22", S_ONE_PRE + "15");
assertQueryDocIds(client, true, S_ONE_PRE + "11", S_TWO_PRE + "21", S_ONE_PRE + "12", S_ONE_PRE + "13", S_TWO_PRE + "23", S_ONE_PRE + "14", S_TWO_PRE + "24", S_TWO_PRE + "25", S_ONE_PRE + "16", S_TWO_PRE + "26");
// clean slate
assertEquals(0, client.deleteByQuery("*:*").getStatus());
// many docs from diff shards, 1 from each shard should fail and 1 w/o uniqueKey
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", S_ONE_PRE + "11")), doc(f("id", S_TWO_PRE + "21")), doc(f("id", S_ONE_PRE + "12")), doc(f("id", S_TWO_PRE + "22"), f("foo_i", "bogus_val")), doc(f("id", S_ONE_PRE + "13")), doc(f("id", S_TWO_PRE + "23")), // no "id"
doc(f("foo_i", "42")), doc(f("id", S_ONE_PRE + "14")), doc(f("id", S_TWO_PRE + "24")), doc(f("id", S_ONE_PRE + "15"), f("foo_i", "bogus_val")), doc(f("id", S_TWO_PRE + "25")), doc(f("id", S_ONE_PRE + "16")), doc(f("id", S_TWO_PRE + "26"))).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantAddErrors("many docs, 1 from each shard (+ no id) should fail", rsp, S_ONE_PRE + "15", "(unknown)", S_TWO_PRE + "22");
assertQueryDocIds(client, false, S_TWO_PRE + "22", S_ONE_PRE + "15");
assertQueryDocIds(client, true, S_ONE_PRE + "11", S_TWO_PRE + "21", S_ONE_PRE + "12", S_ONE_PRE + "13", S_TWO_PRE + "23", S_ONE_PRE + "14", S_TWO_PRE + "24", S_TWO_PRE + "25", S_ONE_PRE + "16", S_TWO_PRE + "26");
// clean slate
assertEquals(0, client.deleteByQuery("*:*").getStatus());
try {
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", S_ONE_PRE + "11")), doc(f("id", S_TWO_PRE + "21"), f("foo_i", "bogus_val")), doc(f("id", S_ONE_PRE + "12")), doc(f("id", S_TWO_PRE + "22"), f("foo_i", "bogus_val")), doc(f("id", S_ONE_PRE + "13")), doc(f("id", S_TWO_PRE + "23"), f("foo_i", "bogus_val")), doc(f("id", S_ONE_PRE + "14"), f("foo_i", "bogus_val")), doc(f("id", S_TWO_PRE + "24")), doc(f("id", S_ONE_PRE + "15"), f("foo_i", "bogus_val")), doc(f("id", S_TWO_PRE + "25")), doc(f("id", S_ONE_PRE + "16"), f("foo_i", "bogus_val")), doc(f("id", S_TWO_PRE + "26"), f("foo_i", "bogus_val")), doc(f("id", S_ONE_PRE + "17")), doc(f("id", S_TWO_PRE + "27")), doc(f("id", S_ONE_PRE + "18"), f("foo_i", "bogus_val")), doc(f("id", S_TWO_PRE + "28"), f("foo_i", "bogus_val")), doc(f("id", S_ONE_PRE + "19"), f("foo_i", "bogus_val")), doc(f("id", S_TWO_PRE + "29"), f("foo_i", "bogus_val")), // may be skipped, more then 10 fails
doc(f("id", S_ONE_PRE + "10")), // may be skipped, more then 10 fails
doc(f("id", S_TWO_PRE + "20"))).process(client);
fail("did not get a top level exception when more then 10 docs failed: " + rsp.toString());
} catch (SolrException e) {
// we can't make any reliable assertions about the error message, because
// it varies based on how the request was routed -- see SOLR-8830
assertEquals("not the type of error we were expecting (" + e.code() + "): " + e.toString(), // on a single node setup -- a 5xx type error isn't something we should have triggered
400, e.code());
// verify that the Exceptions metadata can tell us what failed.
NamedList<String> remoteErrMetadata = e.getMetadata();
assertNotNull("no metadata in: " + e.toString(), remoteErrMetadata);
Set<ToleratedUpdateError> actualKnownErrs = new LinkedHashSet<ToleratedUpdateError>(remoteErrMetadata.size());
int actualKnownErrsCount = 0;
for (int i = 0; i < remoteErrMetadata.size(); i++) {
ToleratedUpdateError err = ToleratedUpdateError.parseMetadataIfToleratedUpdateError(remoteErrMetadata.getName(i), remoteErrMetadata.getVal(i));
if (null == err) {
// some metadata unrelated to this update processor
continue;
}
actualKnownErrsCount++;
actualKnownErrs.add(err);
}
assertEquals("wrong number of errors in metadata: " + remoteErrMetadata.toString(), 11, actualKnownErrsCount);
assertEquals("at least one dup error in metadata: " + remoteErrMetadata.toString(), actualKnownErrsCount, actualKnownErrs.size());
for (ToleratedUpdateError err : actualKnownErrs) {
assertEquals("only expected type of error is ADD: " + err, CmdType.ADD, err.getType());
assertTrue("failed err msg didn't match expected value: " + err, err.getMessage().contains("bogus_val"));
}
}
// need to force since update didn't finish
assertEquals(0, client.commit().getStatus());
assertQueryDocIds(client, false, // explicitly failed
S_TWO_PRE + "21", S_TWO_PRE + "22", S_TWO_PRE + "23", S_ONE_PRE + "14", S_ONE_PRE + "15", S_ONE_PRE + "16", S_TWO_PRE + "26", S_ONE_PRE + "18", S_TWO_PRE + "28", S_ONE_PRE + "19", S_TWO_PRE + "29");
assertQueryDocIds(client, true, S_ONE_PRE + "11", S_ONE_PRE + "12", S_ONE_PRE + "13", S_TWO_PRE + "24", S_TWO_PRE + "25", S_ONE_PRE + "17", S_TWO_PRE + "27");
// clean slate
assertEquals(0, client.deleteByQuery("*:*").getStatus());
try {
ArrayList<SolrInputDocument> docs = new ArrayList<SolrInputDocument>(30);
docs.add(doc(f("id", S_ONE_PRE + "z")));
docs.add(doc(f("id", S_TWO_PRE + "z")));
docs.add(doc(f("id", S_ONE_PRE + "y")));
docs.add(doc(f("id", S_TWO_PRE + "y")));
for (int i = 0; i < 11; i++) {
docs.add(doc(f("id", S_ONE_PRE + i)));
docs.add(doc(f("id", S_TWO_PRE + i), f("foo_i", "bogus_val")));
}
// may be skipped, more then 10 fails
docs.add(doc(f("id", S_ONE_PRE + "x")));
// may be skipped, more then 10 fails
docs.add(doc(f("id", S_TWO_PRE + "x")));
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), docs.toArray(new SolrInputDocument[docs.size()])).process(client);
fail("did not get a top level exception when more then 10 docs failed: " + rsp.toString());
} catch (SolrException e) {
// we can't make any reliable assertions about the error message, because
// it varies based on how the request was routed -- see SOLR-8830
assertEquals("not the type of error we were expecting (" + e.code() + "): " + e.toString(), // on a single node setup -- a 5xx type error isn't something we should have triggered
400, e.code());
// verify that the Exceptions metadata can tell us what failed.
NamedList<String> remoteErrMetadata = e.getMetadata();
assertNotNull("no metadata in: " + e.toString(), remoteErrMetadata);
Set<ToleratedUpdateError> actualKnownErrs = new LinkedHashSet<ToleratedUpdateError>(remoteErrMetadata.size());
int actualKnownErrsCount = 0;
for (int i = 0; i < remoteErrMetadata.size(); i++) {
ToleratedUpdateError err = ToleratedUpdateError.parseMetadataIfToleratedUpdateError(remoteErrMetadata.getName(i), remoteErrMetadata.getVal(i));
if (null == err) {
// some metadata unrelated to this update processor
continue;
}
actualKnownErrsCount++;
actualKnownErrs.add(err);
}
assertEquals("wrong number of errors in metadata: " + remoteErrMetadata.toString(), 11, actualKnownErrsCount);
assertEquals("at least one dup error in metadata: " + remoteErrMetadata.toString(), actualKnownErrsCount, actualKnownErrs.size());
for (ToleratedUpdateError err : actualKnownErrs) {
assertEquals("only expected type of error is ADD: " + err, CmdType.ADD, err.getType());
assertTrue("failed id had unexpected prefix: " + err, err.getId().startsWith(S_TWO_PRE));
assertTrue("failed err msg didn't match expected value: " + err, err.getMessage().contains("bogus_val"));
}
}
// need to force since update didn't finish
assertEquals(0, client.commit().getStatus());
assertQueryDocIds(client, true, // first
S_ONE_PRE + "z", // first
S_ONE_PRE + "y", // first
S_TWO_PRE + "z", // first
S_TWO_PRE + "y", //
S_ONE_PRE + "0", S_ONE_PRE + "1", S_ONE_PRE + "2", S_ONE_PRE + "3", S_ONE_PRE + "4", S_ONE_PRE + "5", S_ONE_PRE + "6", S_ONE_PRE + "7", S_ONE_PRE + "8", S_ONE_PRE + "9");
assertQueryDocIds(client, false, // explicitly failed
S_TWO_PRE + "0", S_TWO_PRE + "1", S_TWO_PRE + "2", S_TWO_PRE + "3", S_TWO_PRE + "4", S_TWO_PRE + "5", S_TWO_PRE + "6", S_TWO_PRE + "7", S_TWO_PRE + "8", S_TWO_PRE + "9");
// clean slate
assertEquals(0, client.deleteByQuery("*:*").getStatus());
try {
ArrayList<SolrInputDocument> docs = new ArrayList<SolrInputDocument>(30);
docs.add(doc(f("id", S_ONE_PRE + "z")));
docs.add(doc(f("id", S_TWO_PRE + "z")));
docs.add(doc(f("id", S_ONE_PRE + "y")));
docs.add(doc(f("id", S_TWO_PRE + "y")));
for (int i = 0; i < 11; i++) {
// no "id" field
docs.add(doc(f("foo_i", "" + i)));
}
// may be skipped, more then 10 fails
docs.add(doc(f("id", S_ONE_PRE + "x")));
// may be skipped, more then 10 fails
docs.add(doc(f("id", S_TWO_PRE + "x")));
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), docs.toArray(new SolrInputDocument[docs.size()])).process(client);
fail("did not get a top level exception when more then 10 docs mising uniqueKey: " + rsp.toString());
} catch (SolrException e) {
// we can't make any reliable assertions about the error message, because
// it varies based on how the request was routed -- see SOLR-8830
assertEquals("not the type of error we were expecting (" + e.code() + "): " + e.toString(), // on a single node setup -- a 5xx type error isn't something we should have triggered
400, e.code());
// verify that the Exceptions metadata can tell us what failed.
NamedList<String> remoteErrMetadata = e.getMetadata();
assertNotNull("no metadata in: " + e.toString(), remoteErrMetadata);
int actualKnownErrsCount = 0;
for (int i = 0; i < remoteErrMetadata.size(); i++) {
ToleratedUpdateError err = ToleratedUpdateError.parseMetadataIfToleratedUpdateError(remoteErrMetadata.getName(i), remoteErrMetadata.getVal(i));
if (null == err) {
// some metadata unrelated to this update processor
continue;
}
actualKnownErrsCount++;
assertEquals("only expected type of error is ADD: " + err, CmdType.ADD, err.getType());
assertTrue("failed id didn't match 'unknown': " + err, err.getId().contains("unknown"));
}
assertEquals("wrong number of errors in metadata: " + remoteErrMetadata.toString(), 11, actualKnownErrsCount);
}
// need to force since update didn't finish
assertEquals(0, client.commit().getStatus());
assertQueryDocIds(client, true, // first
S_ONE_PRE + "z", // first
S_ONE_PRE + "y", // first
S_TWO_PRE + "z", // first
S_TWO_PRE + "y");
// clean slate
assertEquals(0, client.deleteByQuery("*:*").getStatus());
// many docs from diff shards, more then 10 from a single shard (two) should fail but
// request should still succeed because of maxErrors=-1 param
ArrayList<SolrInputDocument> docs = new ArrayList<SolrInputDocument>(30);
ArrayList<ExpectedErr> expectedErrs = new ArrayList<ExpectedErr>(30);
docs.add(doc(f("id", S_ONE_PRE + "z")));
docs.add(doc(f("id", S_TWO_PRE + "z")));
docs.add(doc(f("id", S_ONE_PRE + "y")));
docs.add(doc(f("id", S_TWO_PRE + "y")));
for (int i = 0; i < 11; i++) {
docs.add(doc(f("id", S_ONE_PRE + i)));
docs.add(doc(f("id", S_TWO_PRE + i), f("foo_i", "bogus_val")));
expectedErrs.add(addErr(S_TWO_PRE + i));
}
docs.add(doc(f("id", S_ONE_PRE + "x")));
docs.add(doc(f("id", S_TWO_PRE + "x")));
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "maxErrors", "-1", "commit", "true"), docs.toArray(new SolrInputDocument[docs.size()])).process(client);
assertUpdateTolerantErrors("many docs from shard2 fail, but req should succeed", rsp, expectedErrs.toArray(new ExpectedErr[expectedErrs.size()]));
assertQueryDocIds(client, true, // first
S_ONE_PRE + "z", // first
S_ONE_PRE + "y", // first
S_TWO_PRE + "z", // first
S_TWO_PRE + "y", // later
S_ONE_PRE + "x", // later
S_TWO_PRE + "x");
}
use of org.apache.solr.common.ToleratedUpdateError in project lucene-solr by apache.
the class TolerantUpdateProcessor method processAdd.
@Override
public void processAdd(AddUpdateCommand cmd) throws IOException {
BytesRef id = null;
try {
// force AddUpdateCommand to validate+cache the id before proceeding
id = cmd.getIndexedId();
super.processAdd(cmd);
} catch (Throwable t) {
firstErrTracker.caught(t);
knownErrors.add(new ToleratedUpdateError(CmdType.ADD, getPrintableId(id), t.getMessage()));
if (knownErrors.size() > maxErrors) {
firstErrTracker.throwFirst();
}
}
}
use of org.apache.solr.common.ToleratedUpdateError in project lucene-solr by apache.
the class TestTolerantUpdateProcessorCloud method testAddsMixedWithDeletes.
protected static void testAddsMixedWithDeletes(SolrClient client) throws Exception {
assertNotNull("client not initialized", client);
// 3 doc ids, exactly one on shard1
final String docId1 = S_ONE_PRE + "42";
final String docId21 = S_TWO_PRE + "42";
final String docId22 = S_TWO_PRE + "666";
UpdateResponse rsp = null;
// add 2 docs, one to each shard
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", docId1), f("foo_i", "2001")), doc(f("id", docId21), f("foo_i", "1976"))).process(client);
assertEquals(0, rsp.getStatus());
// add failure on shard2, delete failure on shard1
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", docId22), f("foo_i", "not_a_num"))).deleteById(docId1, -1L).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantErrors("shard2 add fail, shard1 delI fail", rsp, delIErr(docId1, "version conflict"), addErr(docId22, "not_a_num"));
// attempt a request containing 4 errors of various types (add, delI, delQ)
for (String maxErrors : new String[] { "4", "-1", "100" }) {
// for all of these maxErrors values, the overall request should still succeed
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "maxErrors", maxErrors, "commit", "true"), doc(f("id", docId22), f("foo_i", "bogus_val"))).deleteById(docId1, -1L).deleteByQuery("malformed:[").deleteById(docId21, -1L).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantErrors("failed variety of updates", rsp, delIErr(docId1, "version conflict"), delQErr("malformed:[", "SyntaxError"), delIErr(docId21, "version conflict"), addErr(docId22, "bogus_val"));
}
// attempt a request containing 4 errors of various types (add, delI, delQ) .. 1 too many
try {
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "maxErrors", "3", "commit", "true"), doc(f("id", docId22), f("foo_i", "bogus_val"))).deleteById(docId1, -1L).deleteByQuery("malformed:[").deleteById(docId21, -1L).process(client);
fail("did not get a top level exception when more then 4 updates failed: " + rsp.toString());
} catch (SolrException e) {
// we can't make any reliable assertions about the error message, because
// it varies based on how the request was routed -- see SOLR-8830
// likewise, we can't make a firm(er) assertion about the response code...
assertTrue("not the type of error we were expecting (" + e.code() + "): " + e.toString(), // on a single node setup -- a 5xx type error isn't something we should have triggered
400 == e.code() || 409 == e.code());
// verify that the Exceptions metadata can tell us what failed.
NamedList<String> remoteErrMetadata = e.getMetadata();
assertNotNull("no metadata in: " + e.toString(), remoteErrMetadata);
Set<ToleratedUpdateError> actualKnownErrs = new LinkedHashSet<ToleratedUpdateError>(remoteErrMetadata.size());
int actualKnownErrsCount = 0;
for (int i = 0; i < remoteErrMetadata.size(); i++) {
ToleratedUpdateError err = ToleratedUpdateError.parseMetadataIfToleratedUpdateError(remoteErrMetadata.getName(i), remoteErrMetadata.getVal(i));
if (null == err) {
// some metadata unrelated to this update processor
continue;
}
actualKnownErrsCount++;
actualKnownErrs.add(err);
}
assertEquals("wrong number of errors in metadata: " + remoteErrMetadata.toString(), 4, actualKnownErrsCount);
assertEquals("at least one dup error in metadata: " + remoteErrMetadata.toString(), actualKnownErrsCount, actualKnownErrs.size());
}
// sanity check our 2 existing docs are still here
assertQueryDocIds(client, true, docId1, docId21);
assertQueryDocIds(client, false, docId22);
// tolerate some failures along with a DELQ that should succeed
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", docId22), f("foo_i", "not_a_num"))).deleteById(docId1, -1L).deleteByQuery("zot_i:[42 to gibberish...").deleteByQuery("foo_i:[50 TO 2000}").process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantErrors("mix fails with one valid DELQ", rsp, delIErr(docId1, "version conflict"), delQErr("zot_i:[42 to gibberish..."), addErr(docId22, "not_a_num"));
// one of our previous docs should have been deleted now
assertQueryDocIds(client, true, docId1);
assertQueryDocIds(client, false, docId21, docId22);
}
use of org.apache.solr.common.ToleratedUpdateError in project lucene-solr by apache.
the class CloudSolrClient method condenseResponse.
public RouteResponse condenseResponse(NamedList response, int timeMillis) {
RouteResponse condensed = new RouteResponse();
int status = 0;
Integer rf = null;
Integer minRf = null;
// TolerantUpdateProcessor
List<SimpleOrderedMap<String>> toleratedErrors = null;
int maxToleratedErrors = Integer.MAX_VALUE;
// For "adds", "deletes", "deleteByQuery" etc.
Map<String, NamedList> versions = new HashMap<>();
for (int i = 0; i < response.size(); i++) {
NamedList shardResponse = (NamedList) response.getVal(i);
NamedList header = (NamedList) shardResponse.get("responseHeader");
Integer shardStatus = (Integer) header.get("status");
int s = shardStatus.intValue();
if (s > 0) {
status = s;
}
Object rfObj = header.get(UpdateRequest.REPFACT);
if (rfObj != null && rfObj instanceof Integer) {
Integer routeRf = (Integer) rfObj;
if (rf == null || routeRf < rf)
rf = routeRf;
}
minRf = (Integer) header.get(UpdateRequest.MIN_REPFACT);
List<SimpleOrderedMap<String>> shardTolerantErrors = (List<SimpleOrderedMap<String>>) header.get("errors");
if (null != shardTolerantErrors) {
Integer shardMaxToleratedErrors = (Integer) header.get("maxErrors");
assert null != shardMaxToleratedErrors : "TolerantUpdateProcessor reported errors but not maxErrors";
// if we get into some weird state where the nodes disagree about the effective maxErrors,
// assume the min value seen to decide if we should fail.
maxToleratedErrors = Math.min(maxToleratedErrors, ToleratedUpdateError.getEffectiveMaxErrors(shardMaxToleratedErrors.intValue()));
if (null == toleratedErrors) {
toleratedErrors = new ArrayList<SimpleOrderedMap<String>>(shardTolerantErrors.size());
}
for (SimpleOrderedMap<String> err : shardTolerantErrors) {
toleratedErrors.add(err);
}
}
for (String updateType : Arrays.asList("adds", "deletes", "deleteByQuery")) {
Object obj = shardResponse.get(updateType);
if (obj instanceof NamedList) {
NamedList versionsList = versions.containsKey(updateType) ? versions.get(updateType) : new NamedList();
versionsList.addAll((NamedList) obj);
versions.put(updateType, versionsList);
}
}
}
NamedList cheader = new NamedList();
cheader.add("status", status);
cheader.add("QTime", timeMillis);
if (rf != null)
cheader.add(UpdateRequest.REPFACT, rf);
if (minRf != null)
cheader.add(UpdateRequest.MIN_REPFACT, minRf);
if (null != toleratedErrors) {
cheader.add("maxErrors", ToleratedUpdateError.getUserFriendlyMaxErrors(maxToleratedErrors));
cheader.add("errors", toleratedErrors);
if (maxToleratedErrors < toleratedErrors.size()) {
// cumulative errors are too high, we need to throw a client exception w/correct metadata
// NOTE: it shouldn't be possible for 1 == toleratedErrors.size(), because if that were the case
// then at least one shard should have thrown a real error before this, so we don't worry
// about having a more "singular" exception msg for that situation
StringBuilder msgBuf = new StringBuilder().append(toleratedErrors.size()).append(" Async failures during distributed update: ");
NamedList metadata = new NamedList<String>();
for (SimpleOrderedMap<String> err : toleratedErrors) {
ToleratedUpdateError te = ToleratedUpdateError.parseMap(err);
metadata.add(te.getMetadataKey(), te.getMetadataValue());
msgBuf.append("\n").append(te.getMessage());
}
SolrException toThrow = new SolrException(ErrorCode.BAD_REQUEST, msgBuf.toString());
toThrow.setMetadata(metadata);
throw toThrow;
}
}
for (String updateType : versions.keySet()) {
condensed.add(updateType, versions.get(updateType));
}
condensed.add("responseHeader", cheader);
return condensed;
}
Aggregations