use of org.apache.solr.client.solrj.response.UpdateResponse in project lucene-solr by apache.
the class TestTolerantUpdateProcessorCloud method testAddsMixedWithDeletes.
protected static void testAddsMixedWithDeletes(SolrClient client) throws Exception {
assertNotNull("client not initialized", client);
// 3 doc ids, exactly one on shard1
final String docId1 = S_ONE_PRE + "42";
final String docId21 = S_TWO_PRE + "42";
final String docId22 = S_TWO_PRE + "666";
UpdateResponse rsp = null;
// add 2 docs, one to each shard
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", docId1), f("foo_i", "2001")), doc(f("id", docId21), f("foo_i", "1976"))).process(client);
assertEquals(0, rsp.getStatus());
// add failure on shard2, delete failure on shard1
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", docId22), f("foo_i", "not_a_num"))).deleteById(docId1, -1L).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantErrors("shard2 add fail, shard1 delI fail", rsp, delIErr(docId1, "version conflict"), addErr(docId22, "not_a_num"));
// attempt a request containing 4 errors of various types (add, delI, delQ)
for (String maxErrors : new String[] { "4", "-1", "100" }) {
// for all of these maxErrors values, the overall request should still succeed
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "maxErrors", maxErrors, "commit", "true"), doc(f("id", docId22), f("foo_i", "bogus_val"))).deleteById(docId1, -1L).deleteByQuery("malformed:[").deleteById(docId21, -1L).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantErrors("failed variety of updates", rsp, delIErr(docId1, "version conflict"), delQErr("malformed:[", "SyntaxError"), delIErr(docId21, "version conflict"), addErr(docId22, "bogus_val"));
}
// attempt a request containing 4 errors of various types (add, delI, delQ) .. 1 too many
try {
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "maxErrors", "3", "commit", "true"), doc(f("id", docId22), f("foo_i", "bogus_val"))).deleteById(docId1, -1L).deleteByQuery("malformed:[").deleteById(docId21, -1L).process(client);
fail("did not get a top level exception when more then 4 updates failed: " + rsp.toString());
} catch (SolrException e) {
// we can't make any reliable assertions about the error message, because
// it varies based on how the request was routed -- see SOLR-8830
// likewise, we can't make a firm(er) assertion about the response code...
assertTrue("not the type of error we were expecting (" + e.code() + "): " + e.toString(), // on a single node setup -- a 5xx type error isn't something we should have triggered
400 == e.code() || 409 == e.code());
// verify that the Exceptions metadata can tell us what failed.
NamedList<String> remoteErrMetadata = e.getMetadata();
assertNotNull("no metadata in: " + e.toString(), remoteErrMetadata);
Set<ToleratedUpdateError> actualKnownErrs = new LinkedHashSet<ToleratedUpdateError>(remoteErrMetadata.size());
int actualKnownErrsCount = 0;
for (int i = 0; i < remoteErrMetadata.size(); i++) {
ToleratedUpdateError err = ToleratedUpdateError.parseMetadataIfToleratedUpdateError(remoteErrMetadata.getName(i), remoteErrMetadata.getVal(i));
if (null == err) {
// some metadata unrelated to this update processor
continue;
}
actualKnownErrsCount++;
actualKnownErrs.add(err);
}
assertEquals("wrong number of errors in metadata: " + remoteErrMetadata.toString(), 4, actualKnownErrsCount);
assertEquals("at least one dup error in metadata: " + remoteErrMetadata.toString(), actualKnownErrsCount, actualKnownErrs.size());
}
// sanity check our 2 existing docs are still here
assertQueryDocIds(client, true, docId1, docId21);
assertQueryDocIds(client, false, docId22);
// tolerate some failures along with a DELQ that should succeed
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", docId22), f("foo_i", "not_a_num"))).deleteById(docId1, -1L).deleteByQuery("zot_i:[42 to gibberish...").deleteByQuery("foo_i:[50 TO 2000}").process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantErrors("mix fails with one valid DELQ", rsp, delIErr(docId1, "version conflict"), delQErr("zot_i:[42 to gibberish..."), addErr(docId22, "not_a_num"));
// one of our previous docs should have been deleted now
assertQueryDocIds(client, true, docId1);
assertQueryDocIds(client, false, docId21, docId22);
}
use of org.apache.solr.client.solrj.response.UpdateResponse in project lucene-solr by apache.
the class TestTolerantUpdateProcessorCloud method testVariousDeletes.
protected static void testVariousDeletes(SolrClient client) throws Exception {
assertNotNull("client not initialized", client);
// 2 docs, one on each shard
final String docId1 = S_ONE_PRE + "42";
final String docId2 = S_TWO_PRE + "666";
UpdateResponse rsp = null;
// add 1 doc to each shard
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", docId1), f("foo_i", "2001")), doc(f("id", docId2), f("foo_i", "1976"))).process(client);
assertEquals(0, rsp.getStatus());
// attempt to delete individual doc id(s) that should fail because of opportunistic concurrency constraints
for (String id : new String[] { docId1, docId2 }) {
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true")).deleteById(id, -1L).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantErrors("failed opportunistic concurrent delId=" + id, rsp, delIErr(id));
}
// multiple failed deletes from the same shard (via opportunistic concurrent w/ bogus ids)
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true")).deleteById(S_ONE_PRE + "X", +1L).deleteById(S_ONE_PRE + "Y", +1L).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantErrors("failed opportunistic concurrent delete by id for 2 bogus docs", rsp, delIErr(S_ONE_PRE + "X"), delIErr(S_ONE_PRE + "Y"));
assertQueryDocIds(client, true, docId1, docId2);
// multiple failed deletes from the diff shards due to opportunistic concurrency constraints
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true")).deleteById(docId2, -1L).deleteById(docId1, -1L).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantErrors("failed opportunistic concurrent delete by id for 2 docs", rsp, delIErr(docId1), delIErr(docId2));
assertQueryDocIds(client, true, docId1, docId2);
// deleteByQuery using malformed query (fail)
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true")).deleteByQuery("bogus_field:foo").process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantErrors("failed opportunistic concurrent delete by query", rsp, delQErr("bogus_field:foo"));
assertQueryDocIds(client, true, docId1, docId2);
// mix 2 deleteByQuery, one malformed (fail), one that doesn't match anything (ok)
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true")).deleteByQuery("bogus_field:foo").deleteByQuery("foo_i:23").process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantErrors("failed opportunistic concurrent delete by query", rsp, delQErr("bogus_field:foo"));
assertQueryDocIds(client, true, docId1, docId2);
// mix 2 deleteById using _version_=-1, one for real doc1 (fail), one for bogus id (ok)
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true")).deleteById(docId1, -1L).deleteById("bogus", -1L).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantErrors("failed opportunistic concurrent delete by id: exists", rsp, delIErr(docId1));
assertQueryDocIds(client, true, docId1, docId2);
// mix 2 deleteById using _version_=1, one for real doc1 (ok, deleted), one for bogus id (fail)
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true")).deleteById(docId1, +1L).deleteById("bogusId", +1L).process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantErrors("failed opportunistic concurrent delete by id: bogus", rsp, delIErr("bogusId"));
assertQueryDocIds(client, false, docId1);
assertQueryDocIds(client, true, docId2);
// mix 2 deleteByQuery, one malformed (fail), one that alctaully removes some docs (ok)
assertQueryDocIds(client, true, docId2);
rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true")).deleteByQuery("bogus_field:foo").deleteByQuery("foo_i:1976").process(client);
assertEquals(0, rsp.getStatus());
assertUpdateTolerantErrors("failed opportunistic concurrent delete by query", rsp, delQErr("bogus_field:foo"));
assertQueryDocIds(client, false, docId2);
}
use of org.apache.solr.client.solrj.response.UpdateResponse in project lucene-solr by apache.
the class CloudSolrClientTest method testRouting.
@Test
public void testRouting() throws Exception {
AbstractUpdateRequest request = new UpdateRequest().add(id, "0", "a_t", "hello1").add(id, "2", "a_t", "hello2").setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true);
// Test single threaded routed updates for UpdateRequest
NamedList<Object> response = getRandomClient().request(request, COLLECTION);
if (getRandomClient().isDirectUpdatesToLeadersOnly()) {
checkSingleServer(response);
}
CloudSolrClient.RouteResponse rr = (CloudSolrClient.RouteResponse) response;
Map<String, LBHttpSolrClient.Req> routes = rr.getRoutes();
Iterator<Map.Entry<String, LBHttpSolrClient.Req>> it = routes.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<String, LBHttpSolrClient.Req> entry = it.next();
String url = entry.getKey();
UpdateRequest updateRequest = (UpdateRequest) entry.getValue().getRequest();
SolrInputDocument doc = updateRequest.getDocuments().get(0);
String id = doc.getField("id").getValue().toString();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", "id:" + id);
params.add("distrib", "false");
QueryRequest queryRequest = new QueryRequest(params);
try (HttpSolrClient solrClient = getHttpSolrClient(url)) {
QueryResponse queryResponse = queryRequest.process(solrClient);
SolrDocumentList docList = queryResponse.getResults();
assertTrue(docList.getNumFound() == 1);
}
}
// Test the deleteById routing for UpdateRequest
final UpdateResponse uResponse = new UpdateRequest().deleteById("0").deleteById("2").commit(cluster.getSolrClient(), COLLECTION);
if (getRandomClient().isDirectUpdatesToLeadersOnly()) {
checkSingleServer(uResponse.getResponse());
}
QueryResponse qResponse = getRandomClient().query(COLLECTION, new SolrQuery("*:*"));
SolrDocumentList docs = qResponse.getResults();
assertEquals(0, docs.getNumFound());
// Test Multi-Threaded routed updates for UpdateRequest
try (CloudSolrClient threadedClient = getCloudSolrClient(cluster.getZkServer().getZkAddress())) {
threadedClient.setParallelUpdates(true);
threadedClient.setDefaultCollection(COLLECTION);
response = threadedClient.request(request);
if (threadedClient.isDirectUpdatesToLeadersOnly()) {
checkSingleServer(response);
}
rr = (CloudSolrClient.RouteResponse) response;
routes = rr.getRoutes();
it = routes.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<String, LBHttpSolrClient.Req> entry = it.next();
String url = entry.getKey();
UpdateRequest updateRequest = (UpdateRequest) entry.getValue().getRequest();
SolrInputDocument doc = updateRequest.getDocuments().get(0);
String id = doc.getField("id").getValue().toString();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", "id:" + id);
params.add("distrib", "false");
QueryRequest queryRequest = new QueryRequest(params);
try (HttpSolrClient solrClient = getHttpSolrClient(url)) {
QueryResponse queryResponse = queryRequest.process(solrClient);
SolrDocumentList docList = queryResponse.getResults();
assertTrue(docList.getNumFound() == 1);
}
}
}
// Test that queries with _route_ params are routed by the client
// Track request counts on each node before query calls
ClusterState clusterState = cluster.getSolrClient().getZkStateReader().getClusterState();
DocCollection col = clusterState.getCollection(COLLECTION);
Map<String, Long> requestCountsMap = Maps.newHashMap();
for (Slice slice : col.getSlices()) {
for (Replica replica : slice.getReplicas()) {
String baseURL = (String) replica.get(ZkStateReader.BASE_URL_PROP);
requestCountsMap.put(baseURL, getNumRequests(baseURL, COLLECTION));
}
}
// Collect the base URLs of the replicas of shard that's expected to be hit
DocRouter router = col.getRouter();
Collection<Slice> expectedSlices = router.getSearchSlicesSingle("0", null, col);
Set<String> expectedBaseURLs = Sets.newHashSet();
for (Slice expectedSlice : expectedSlices) {
for (Replica replica : expectedSlice.getReplicas()) {
String baseURL = (String) replica.get(ZkStateReader.BASE_URL_PROP);
expectedBaseURLs.add(baseURL);
}
}
assertTrue("expected urls is not fewer than all urls! expected=" + expectedBaseURLs + "; all=" + requestCountsMap.keySet(), expectedBaseURLs.size() < requestCountsMap.size());
// Calculate a number of shard keys that route to the same shard.
int n;
if (TEST_NIGHTLY) {
n = random().nextInt(999) + 2;
} else {
n = random().nextInt(9) + 2;
}
List<String> sameShardRoutes = Lists.newArrayList();
sameShardRoutes.add("0");
for (int i = 1; i < n; i++) {
String shardKey = Integer.toString(i);
Collection<Slice> slices = router.getSearchSlicesSingle(shardKey, null, col);
log.info("Expected Slices {}", slices);
if (expectedSlices.equals(slices)) {
sameShardRoutes.add(shardKey);
}
}
assertTrue(sameShardRoutes.size() > 1);
// Do N queries with _route_ parameter to the same shard
for (int i = 0; i < n; i++) {
ModifiableSolrParams solrParams = new ModifiableSolrParams();
solrParams.set(CommonParams.Q, "*:*");
solrParams.set(ShardParams._ROUTE_, sameShardRoutes.get(random().nextInt(sameShardRoutes.size())));
log.info("output: {}", getRandomClient().query(COLLECTION, solrParams));
}
// Request counts increase from expected nodes should aggregate to 1000, while there should be
// no increase in unexpected nodes.
int increaseFromExpectedUrls = 0;
int increaseFromUnexpectedUrls = 0;
Map<String, Long> numRequestsToUnexpectedUrls = Maps.newHashMap();
for (Slice slice : col.getSlices()) {
for (Replica replica : slice.getReplicas()) {
String baseURL = (String) replica.get(ZkStateReader.BASE_URL_PROP);
Long prevNumRequests = requestCountsMap.get(baseURL);
Long curNumRequests = getNumRequests(baseURL, COLLECTION);
long delta = curNumRequests - prevNumRequests;
if (expectedBaseURLs.contains(baseURL)) {
increaseFromExpectedUrls += delta;
} else {
increaseFromUnexpectedUrls += delta;
numRequestsToUnexpectedUrls.put(baseURL, delta);
}
}
}
assertEquals("Unexpected number of requests to expected URLs", n, increaseFromExpectedUrls);
assertEquals("Unexpected number of requests to unexpected URLs: " + numRequestsToUnexpectedUrls, 0, increaseFromUnexpectedUrls);
}
use of org.apache.solr.client.solrj.response.UpdateResponse in project lucene-solr by apache.
the class CloudSolrClientTest method testParallelUpdateQTime.
@Test
public void testParallelUpdateQTime() throws Exception {
UpdateRequest req = new UpdateRequest();
for (int i = 0; i < 10; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", String.valueOf(TestUtil.nextInt(random(), 1000, 1100)));
req.add(doc);
}
UpdateResponse response = req.process(getRandomClient(), COLLECTION);
// See SOLR-6547, we just need to ensure that no exception is thrown here
assertTrue(response.getQTime() >= 0);
}
use of org.apache.solr.client.solrj.response.UpdateResponse in project lucene-solr by apache.
the class SolrTestCaseHS method add.
/** Adds a document using the specific client, or to the local test core if null.
* Returns the version. TODO: work in progress... version not always returned. */
public static Long add(SolrClient client, SolrInputDocument sdoc, ModifiableSolrParams params) throws Exception {
if (client == null) {
Long version = addAndGetVersion(sdoc, params);
return version;
} else {
UpdateRequest updateRequest = new UpdateRequest();
if (params != null) {
updateRequest.setParams(params);
}
updateRequest.add(sdoc);
UpdateResponse rsp = updateRequest.process(client);
// TODO - return version
return null;
}
}
Aggregations