Search in sources :

Example 26 with UpdateResponse

use of org.apache.solr.client.solrj.response.UpdateResponse in project lucene-solr by apache.

the class TestTolerantUpdateProcessorRandomCloud method testRandomUpdates.

public void testRandomUpdates() throws Exception {
    final int maxDocId = atLeast(10000);
    final BitSet expectedDocIds = new BitSet(maxDocId + 1);
    final int numIters = atLeast(50);
    for (int i = 0; i < numIters; i++) {
        log.info("BEGIN ITER #{}", i);
        final UpdateRequest req = update(params("maxErrors", "-1", "update.chain", "tolerant-chain-max-errors-10"));
        final int numCmds = TestUtil.nextInt(random(), 1, 20);
        final List<ExpectedErr> expectedErrors = new ArrayList<ExpectedErr>(numCmds);
        int expectedErrorsCount = 0;
        // it's ambigious/confusing which order mixed DELQ + ADD  (or ADD and DELI for the same ID)
        // in the same request wll be processed by various clients, so we keep things simple
        // and ensure that no single doc Id is affected by more then one command in the same request
        final BitSet docsAffectedThisRequest = new BitSet(maxDocId + 1);
        for (int cmdIter = 0; cmdIter < numCmds; cmdIter++) {
            if ((maxDocId / 2) < docsAffectedThisRequest.cardinality()) {
                // we're already mucking with more then half the docs in the index
                break;
            }
            final boolean causeError = random().nextBoolean();
            if (causeError) {
                expectedErrorsCount++;
            }
            if (random().nextBoolean()) {
                // add a doc
                String id = null;
                SolrInputDocument doc = null;
                if (causeError && (0 == TestUtil.nextInt(random(), 0, 21))) {
                    doc = doc(f("foo_s", "no unique key"));
                    expectedErrors.add(addErr("(unknown)"));
                } else {
                    final int id_i = randomUnsetBit(random(), docsAffectedThisRequest, maxDocId);
                    docsAffectedThisRequest.set(id_i);
                    id = "id_" + id_i;
                    if (causeError) {
                        expectedErrors.add(addErr(id));
                    } else {
                        expectedDocIds.set(id_i);
                    }
                    final String val = causeError ? "bogus_val" : ("" + TestUtil.nextInt(random(), 42, 666));
                    doc = doc(f("id", id), f("id_i", id_i), f("foo_i", val));
                }
                req.add(doc);
                log.info("ADD: {} = {}", id, doc);
            } else {
                // delete something
                if (random().nextBoolean()) {
                    // delete by id
                    final int id_i = randomUnsetBit(random(), docsAffectedThisRequest, maxDocId);
                    final String id = "id_" + id_i;
                    final boolean docExists = expectedDocIds.get(id_i);
                    docsAffectedThisRequest.set(id_i);
                    long versionConstraint = docExists ? 1 : -1;
                    if (causeError) {
                        versionConstraint = -1 * versionConstraint;
                        expectedErrors.add(delIErr(id));
                    } else {
                        // if doc exists it will legitimately be deleted
                        expectedDocIds.clear(id_i);
                    }
                    req.deleteById(id, versionConstraint);
                    log.info("DEL: {} = {}", id, causeError ? "ERR" : "OK");
                } else {
                    // delete by query
                    final String q;
                    if (causeError) {
                        // even though our DBQ is gibberish that's going to fail, record a docId as affected
                        // so that we don't generate the same random DBQ and get redundent errors
                        // (problematic because of how DUP forwarded DBQs have to have their errors deduped by TUP)
                        final int id_i = randomUnsetBit(random(), docsAffectedThisRequest, maxDocId);
                        docsAffectedThisRequest.set(id_i);
                        q = "foo_i:[" + id_i + " TO ....giberish";
                        expectedErrors.add(delQErr(q));
                    } else {
                        // ensure our DBQ is only over a range of docs not already affected
                        // by any other cmds in this request
                        final int rangeAxis = randomUnsetBit(random(), docsAffectedThisRequest, maxDocId);
                        final int loBound = docsAffectedThisRequest.previousSetBit(rangeAxis);
                        final int hiBound = docsAffectedThisRequest.nextSetBit(rangeAxis);
                        final int lo = TestUtil.nextInt(random(), loBound + 1, rangeAxis);
                        final int hi = TestUtil.nextInt(random(), rangeAxis, // bound might be negative if no set bits above axis
                        (hiBound < 0) ? maxDocId : hiBound - 1);
                        if (lo != hi) {
                            assert lo < hi : "lo=" + lo + " hi=" + hi;
                            // NOTE: clear & set are exclusive of hi, so we use "}" in range query accordingly
                            q = "id_i:[" + lo + " TO " + hi + "}";
                            expectedDocIds.clear(lo, hi);
                            docsAffectedThisRequest.set(lo, hi);
                        } else {
                            // edge case: special case DBQ of one doc
                            assert (lo == rangeAxis && hi == rangeAxis) : "lo=" + lo + " axis=" + rangeAxis + " hi=" + hi;
                            // have to be inclusive of both ends
                            q = "id_i:[" + lo + " TO " + lo + "]";
                            expectedDocIds.clear(lo);
                            docsAffectedThisRequest.set(lo);
                        }
                    }
                    req.deleteByQuery(q);
                    log.info("DEL: {}", q);
                }
            }
        }
        assertEquals("expected error count sanity check: " + req.toString(), expectedErrorsCount, expectedErrors.size());
        final SolrClient client = random().nextBoolean() ? CLOUD_CLIENT : NODE_CLIENTS.get(TestUtil.nextInt(random(), 0, NODE_CLIENTS.size() - 1));
        final UpdateResponse rsp = req.process(client);
        assertUpdateTolerantErrors(client.toString() + " => " + expectedErrors.toString(), rsp, expectedErrors.toArray(new ExpectedErr[expectedErrors.size()]));
        log.info("END ITER #{}, expecting #docs: {}", i, expectedDocIds.cardinality());
        assertEquals("post update commit failed?", 0, CLOUD_CLIENT.commit().getStatus());
        for (int j = 0; j < 5; j++) {
            if (expectedDocIds.cardinality() == countDocs(CLOUD_CLIENT)) {
                break;
            }
            log.info("sleeping to give searchers a chance to re-open #" + j);
            Thread.sleep(200);
        }
        // check the index contents against our expectations
        final BitSet actualDocIds = allDocs(CLOUD_CLIENT, maxDocId);
        if (expectedDocIds.cardinality() != actualDocIds.cardinality()) {
            log.error("cardinality mismatch: expected {} BUT actual {}", expectedDocIds.cardinality(), actualDocIds.cardinality());
        }
        final BitSet x = (BitSet) actualDocIds.clone();
        x.xor(expectedDocIds);
        for (int b = x.nextSetBit(0); 0 <= b; b = x.nextSetBit(b + 1)) {
            final boolean expectedBit = expectedDocIds.get(b);
            final boolean actualBit = actualDocIds.get(b);
            log.error("bit #" + b + " mismatch: expected {} BUT actual {}", expectedBit, actualBit);
        }
        assertEquals(x.cardinality() + " mismatched bits", expectedDocIds.cardinality(), actualDocIds.cardinality());
    }
}
Also used : UpdateResponse(org.apache.solr.client.solrj.response.UpdateResponse) SolrInputDocument(org.apache.solr.common.SolrInputDocument) UpdateRequest(org.apache.solr.client.solrj.request.UpdateRequest) CloudSolrClient(org.apache.solr.client.solrj.impl.CloudSolrClient) SolrClient(org.apache.solr.client.solrj.SolrClient) HttpSolrClient(org.apache.solr.client.solrj.impl.HttpSolrClient) BitSet(java.util.BitSet) ArrayList(java.util.ArrayList) ExpectedErr(org.apache.solr.cloud.TestTolerantUpdateProcessorCloud.ExpectedErr)

Example 27 with UpdateResponse

use of org.apache.solr.client.solrj.response.UpdateResponse in project lucene-solr by apache.

the class TestTolerantUpdateProcessorCloud method testAddsMixedWithDeletes.

protected static void testAddsMixedWithDeletes(SolrClient client) throws Exception {
    assertNotNull("client not initialized", client);
    // 3 doc ids, exactly one on shard1
    final String docId1 = S_ONE_PRE + "42";
    final String docId21 = S_TWO_PRE + "42";
    final String docId22 = S_TWO_PRE + "666";
    UpdateResponse rsp = null;
    // add 2 docs, one to each shard
    rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", docId1), f("foo_i", "2001")), doc(f("id", docId21), f("foo_i", "1976"))).process(client);
    assertEquals(0, rsp.getStatus());
    // add failure on shard2, delete failure on shard1
    rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", docId22), f("foo_i", "not_a_num"))).deleteById(docId1, -1L).process(client);
    assertEquals(0, rsp.getStatus());
    assertUpdateTolerantErrors("shard2 add fail, shard1 delI fail", rsp, delIErr(docId1, "version conflict"), addErr(docId22, "not_a_num"));
    // attempt a request containing 4 errors of various types (add, delI, delQ)
    for (String maxErrors : new String[] { "4", "-1", "100" }) {
        // for all of these maxErrors values, the overall request should still succeed
        rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "maxErrors", maxErrors, "commit", "true"), doc(f("id", docId22), f("foo_i", "bogus_val"))).deleteById(docId1, -1L).deleteByQuery("malformed:[").deleteById(docId21, -1L).process(client);
        assertEquals(0, rsp.getStatus());
        assertUpdateTolerantErrors("failed variety of updates", rsp, delIErr(docId1, "version conflict"), delQErr("malformed:[", "SyntaxError"), delIErr(docId21, "version conflict"), addErr(docId22, "bogus_val"));
    }
    // attempt a request containing 4 errors of various types (add, delI, delQ) .. 1 too many
    try {
        rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "maxErrors", "3", "commit", "true"), doc(f("id", docId22), f("foo_i", "bogus_val"))).deleteById(docId1, -1L).deleteByQuery("malformed:[").deleteById(docId21, -1L).process(client);
        fail("did not get a top level exception when more then 4 updates failed: " + rsp.toString());
    } catch (SolrException e) {
        // we can't make any reliable assertions about the error message, because
        // it varies based on how the request was routed -- see SOLR-8830
        // likewise, we can't make a firm(er) assertion about the response code...
        assertTrue("not the type of error we were expecting (" + e.code() + "): " + e.toString(), // on a single node setup -- a 5xx type error isn't something we should have triggered
        400 == e.code() || 409 == e.code());
        // verify that the Exceptions metadata can tell us what failed.
        NamedList<String> remoteErrMetadata = e.getMetadata();
        assertNotNull("no metadata in: " + e.toString(), remoteErrMetadata);
        Set<ToleratedUpdateError> actualKnownErrs = new LinkedHashSet<ToleratedUpdateError>(remoteErrMetadata.size());
        int actualKnownErrsCount = 0;
        for (int i = 0; i < remoteErrMetadata.size(); i++) {
            ToleratedUpdateError err = ToleratedUpdateError.parseMetadataIfToleratedUpdateError(remoteErrMetadata.getName(i), remoteErrMetadata.getVal(i));
            if (null == err) {
                // some metadata unrelated to this update processor
                continue;
            }
            actualKnownErrsCount++;
            actualKnownErrs.add(err);
        }
        assertEquals("wrong number of errors in metadata: " + remoteErrMetadata.toString(), 4, actualKnownErrsCount);
        assertEquals("at least one dup error in metadata: " + remoteErrMetadata.toString(), actualKnownErrsCount, actualKnownErrs.size());
    }
    // sanity check our 2 existing docs are still here
    assertQueryDocIds(client, true, docId1, docId21);
    assertQueryDocIds(client, false, docId22);
    // tolerate some failures along with a DELQ that should succeed
    rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", docId22), f("foo_i", "not_a_num"))).deleteById(docId1, -1L).deleteByQuery("zot_i:[42 to gibberish...").deleteByQuery("foo_i:[50 TO 2000}").process(client);
    assertEquals(0, rsp.getStatus());
    assertUpdateTolerantErrors("mix fails with one valid DELQ", rsp, delIErr(docId1, "version conflict"), delQErr("zot_i:[42 to gibberish..."), addErr(docId22, "not_a_num"));
    // one of our previous docs should have been deleted now
    assertQueryDocIds(client, true, docId1);
    assertQueryDocIds(client, false, docId21, docId22);
}
Also used : UpdateResponse(org.apache.solr.client.solrj.response.UpdateResponse) LinkedHashSet(java.util.LinkedHashSet) Set(java.util.Set) NamedList(org.apache.solr.common.util.NamedList) ToleratedUpdateError(org.apache.solr.common.ToleratedUpdateError) SolrException(org.apache.solr.common.SolrException)

Example 28 with UpdateResponse

use of org.apache.solr.client.solrj.response.UpdateResponse in project lucene-solr by apache.

the class TestTolerantUpdateProcessorCloud method testVariousDeletes.

protected static void testVariousDeletes(SolrClient client) throws Exception {
    assertNotNull("client not initialized", client);
    // 2 docs, one on each shard
    final String docId1 = S_ONE_PRE + "42";
    final String docId2 = S_TWO_PRE + "666";
    UpdateResponse rsp = null;
    // add 1 doc to each shard
    rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true"), doc(f("id", docId1), f("foo_i", "2001")), doc(f("id", docId2), f("foo_i", "1976"))).process(client);
    assertEquals(0, rsp.getStatus());
    // attempt to delete individual doc id(s) that should fail because of opportunistic concurrency constraints
    for (String id : new String[] { docId1, docId2 }) {
        rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true")).deleteById(id, -1L).process(client);
        assertEquals(0, rsp.getStatus());
        assertUpdateTolerantErrors("failed opportunistic concurrent delId=" + id, rsp, delIErr(id));
    }
    // multiple failed deletes from the same shard (via opportunistic concurrent w/ bogus ids)
    rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true")).deleteById(S_ONE_PRE + "X", +1L).deleteById(S_ONE_PRE + "Y", +1L).process(client);
    assertEquals(0, rsp.getStatus());
    assertUpdateTolerantErrors("failed opportunistic concurrent delete by id for 2 bogus docs", rsp, delIErr(S_ONE_PRE + "X"), delIErr(S_ONE_PRE + "Y"));
    assertQueryDocIds(client, true, docId1, docId2);
    // multiple failed deletes from the diff shards due to opportunistic concurrency constraints
    rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true")).deleteById(docId2, -1L).deleteById(docId1, -1L).process(client);
    assertEquals(0, rsp.getStatus());
    assertUpdateTolerantErrors("failed opportunistic concurrent delete by id for 2 docs", rsp, delIErr(docId1), delIErr(docId2));
    assertQueryDocIds(client, true, docId1, docId2);
    // deleteByQuery using malformed query (fail)
    rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true")).deleteByQuery("bogus_field:foo").process(client);
    assertEquals(0, rsp.getStatus());
    assertUpdateTolerantErrors("failed opportunistic concurrent delete by query", rsp, delQErr("bogus_field:foo"));
    assertQueryDocIds(client, true, docId1, docId2);
    // mix 2 deleteByQuery, one malformed (fail), one that doesn't match anything (ok)
    rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true")).deleteByQuery("bogus_field:foo").deleteByQuery("foo_i:23").process(client);
    assertEquals(0, rsp.getStatus());
    assertUpdateTolerantErrors("failed opportunistic concurrent delete by query", rsp, delQErr("bogus_field:foo"));
    assertQueryDocIds(client, true, docId1, docId2);
    // mix 2 deleteById using _version_=-1, one for real doc1 (fail), one for bogus id (ok)
    rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true")).deleteById(docId1, -1L).deleteById("bogus", -1L).process(client);
    assertEquals(0, rsp.getStatus());
    assertUpdateTolerantErrors("failed opportunistic concurrent delete by id: exists", rsp, delIErr(docId1));
    assertQueryDocIds(client, true, docId1, docId2);
    // mix 2 deleteById using _version_=1, one for real doc1 (ok, deleted), one for bogus id (fail)
    rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true")).deleteById(docId1, +1L).deleteById("bogusId", +1L).process(client);
    assertEquals(0, rsp.getStatus());
    assertUpdateTolerantErrors("failed opportunistic concurrent delete by id: bogus", rsp, delIErr("bogusId"));
    assertQueryDocIds(client, false, docId1);
    assertQueryDocIds(client, true, docId2);
    // mix 2 deleteByQuery, one malformed (fail), one that alctaully removes some docs (ok)
    assertQueryDocIds(client, true, docId2);
    rsp = update(params("update.chain", "tolerant-chain-max-errors-10", "commit", "true")).deleteByQuery("bogus_field:foo").deleteByQuery("foo_i:1976").process(client);
    assertEquals(0, rsp.getStatus());
    assertUpdateTolerantErrors("failed opportunistic concurrent delete by query", rsp, delQErr("bogus_field:foo"));
    assertQueryDocIds(client, false, docId2);
}
Also used : UpdateResponse(org.apache.solr.client.solrj.response.UpdateResponse)

Example 29 with UpdateResponse

use of org.apache.solr.client.solrj.response.UpdateResponse in project lucene-solr by apache.

the class TestInPlaceUpdatesDistrib method reorderedDBQIndividualReplicaTest.

private void reorderedDBQIndividualReplicaTest() throws Exception {
    if (onlyLeaderIndexes) {
        log.info("RTG with DBQs are not working in tlog replicas");
        return;
    }
    clearIndex();
    commit();
    // put replica out of sync
    float newinplace_updatable_float = 100;
    long version0 = 2000;
    List<UpdateRequest> updates = new ArrayList<>();
    updates.add(simulatedUpdateRequest(null, "id", 0, "title_s", "title0_new", "inplace_updatable_float", newinplace_updatable_float, "_version_", // full update
    version0 + 1));
    updates.add(simulatedUpdateRequest(version0 + 1, "id", 0, "inplace_updatable_float", newinplace_updatable_float + 1, "_version_", // inplace_updatable_float=101
    version0 + 2));
    updates.add(simulatedDeleteRequest("inplace_updatable_float:" + (newinplace_updatable_float + 1), version0 + 3));
    // Reordering needs to happen using parallel threads
    ExecutorService threadpool = ExecutorUtil.newMDCAwareFixedThreadPool(updates.size() + 1, new DefaultSolrThreadFactory(getTestName()));
    // re-order the updates by swapping the last two
    List<UpdateRequest> reorderedUpdates = new ArrayList<>(updates);
    reorderedUpdates.set(1, updates.get(2));
    reorderedUpdates.set(2, updates.get(1));
    List<Future<UpdateResponse>> updateResponses = new ArrayList<>();
    for (UpdateRequest update : reorderedUpdates) {
        AsyncUpdateWithRandomCommit task = new AsyncUpdateWithRandomCommit(update, NONLEADERS.get(0), random().nextLong());
        updateResponses.add(threadpool.submit(task));
        // while we can't guarantee/trust what order the updates are executed in, since multiple threads
        // are involved, but we're trying to bias the thread scheduling to run them in the order submitted
        Thread.sleep(100);
    }
    threadpool.shutdown();
    assertTrue("Thread pool didn't terminate within 15 secs", threadpool.awaitTermination(15, TimeUnit.SECONDS));
    // assert all requests were successful
    for (Future<UpdateResponse> resp : updateResponses) {
        assertEquals(0, resp.get().getStatus());
    }
    SolrDocument doc = NONLEADERS.get(0).getById(String.valueOf(0), params("distrib", "false"));
    assertNull("This doc was supposed to have been deleted, but was: " + doc, doc);
    log.info("reorderedDBQIndividualReplicaTest: This test passed fine...");
    clearIndex();
    commit();
}
Also used : UpdateRequest(org.apache.solr.client.solrj.request.UpdateRequest) ArrayList(java.util.ArrayList) DefaultSolrThreadFactory(org.apache.solr.util.DefaultSolrThreadFactory) UpdateResponse(org.apache.solr.client.solrj.response.UpdateResponse) SolrDocument(org.apache.solr.common.SolrDocument) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future)

Example 30 with UpdateResponse

use of org.apache.solr.client.solrj.response.UpdateResponse in project lucene-solr by apache.

the class TestInPlaceUpdatesDistrib method outOfOrderUpdatesIndividualReplicaTest.

private void outOfOrderUpdatesIndividualReplicaTest() throws Exception {
    clearIndex();
    commit();
    buildRandomIndex(0);
    float inplace_updatable_float = 1;
    // update doc, set
    index("id", 0, "inplace_updatable_float", map("set", inplace_updatable_float));
    LEADER.commit();
    // RTG straight from the index
    SolrDocument sdoc = LEADER.getById("0");
    assertEquals(inplace_updatable_float, sdoc.get("inplace_updatable_float"));
    assertEquals("title0", sdoc.get("title_s"));
    long version0 = (long) sdoc.get("_version_");
    // put replica out of sync
    float newinplace_updatable_float = 100;
    List<UpdateRequest> updates = new ArrayList<>();
    // full update
    updates.add(simulatedUpdateRequest(null, "id", 0, "title_s", "title0_new", "inplace_updatable_float", newinplace_updatable_float, "_version_", version0 + 1));
    for (int i = 1; i < atLeast(3); i++) {
        updates.add(simulatedUpdateRequest(version0 + i, "id", 0, "inplace_updatable_float", newinplace_updatable_float + i, "_version_", version0 + i + 1));
    }
    // order the updates correctly for NONLEADER 1
    for (UpdateRequest update : updates) {
        log.info("Issuing well ordered update: " + update.getDocuments());
        NONLEADERS.get(1).request(update);
    }
    // Reordering needs to happen using parallel threads, since some of these updates will
    // be blocking calls, waiting for some previous updates to arrive on which it depends.
    ExecutorService threadpool = ExecutorUtil.newMDCAwareFixedThreadPool(updates.size() + 1, new DefaultSolrThreadFactory(getTestName()));
    // re-order the updates for NONLEADER 0
    List<UpdateRequest> reorderedUpdates = new ArrayList<>(updates);
    Collections.shuffle(reorderedUpdates, r);
    List<Future<UpdateResponse>> updateResponses = new ArrayList<>();
    for (UpdateRequest update : reorderedUpdates) {
        AsyncUpdateWithRandomCommit task = new AsyncUpdateWithRandomCommit(update, NONLEADERS.get(0), random().nextLong());
        updateResponses.add(threadpool.submit(task));
        // while we can't guarantee/trust what order the updates are executed in, since multiple threads
        // are involved, but we're trying to bias the thread scheduling to run them in the order submitted
        Thread.sleep(10);
    }
    threadpool.shutdown();
    assertTrue("Thread pool didn't terminate within 15 secs", threadpool.awaitTermination(15, TimeUnit.SECONDS));
    // assert all requests were successful
    for (Future<UpdateResponse> resp : updateResponses) {
        assertEquals(0, resp.get().getStatus());
    }
    // assert both replicas have same effect
    for (SolrClient client : NONLEADERS) {
        // 0th is re-ordered replica, 1st is well-ordered replica
        log.info("Testing client: " + ((HttpSolrClient) client).getBaseURL());
        assertReplicaValue(client, 0, "inplace_updatable_float", (newinplace_updatable_float + (float) (updates.size() - 1)), "inplace_updatable_float didn't match for replica at client: " + ((HttpSolrClient) client).getBaseURL());
        assertReplicaValue(client, 0, "title_s", "title0_new", "Title didn't match for replica at client: " + ((HttpSolrClient) client).getBaseURL());
        assertEquals(version0 + updates.size(), getReplicaValue(client, 0, "_version_"));
    }
    log.info("outOfOrderUpdatesIndividualReplicaTest: This test passed fine...");
}
Also used : UpdateRequest(org.apache.solr.client.solrj.request.UpdateRequest) ArrayList(java.util.ArrayList) DefaultSolrThreadFactory(org.apache.solr.util.DefaultSolrThreadFactory) HttpSolrClient(org.apache.solr.client.solrj.impl.HttpSolrClient) UpdateResponse(org.apache.solr.client.solrj.response.UpdateResponse) SolrDocument(org.apache.solr.common.SolrDocument) SolrClient(org.apache.solr.client.solrj.SolrClient) HttpSolrClient(org.apache.solr.client.solrj.impl.HttpSolrClient) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future)

Aggregations

UpdateResponse (org.apache.solr.client.solrj.response.UpdateResponse)43 SolrInputDocument (org.apache.solr.common.SolrInputDocument)17 UpdateRequest (org.apache.solr.client.solrj.request.UpdateRequest)16 IOException (java.io.IOException)13 SolrServerException (org.apache.solr.client.solrj.SolrServerException)12 ArrayList (java.util.ArrayList)10 SolrClient (org.apache.solr.client.solrj.SolrClient)8 HttpSolrClient (org.apache.solr.client.solrj.impl.HttpSolrClient)8 SolrDocument (org.apache.solr.common.SolrDocument)8 NamedList (org.apache.solr.common.util.NamedList)7 Future (java.util.concurrent.Future)6 Test (org.junit.Test)6 ExecutorService (java.util.concurrent.ExecutorService)5 SolrException (org.apache.solr.common.SolrException)5 DefaultSolrThreadFactory (org.apache.solr.util.DefaultSolrThreadFactory)5 ModifiableSolrParams (org.apache.solr.common.params.ModifiableSolrParams)4 CloudSolrClient (org.apache.solr.client.solrj.impl.CloudSolrClient)3 QueryResponse (org.apache.solr.client.solrj.response.QueryResponse)3 MalformedURLException (java.net.MalformedURLException)2 HashMap (java.util.HashMap)2