Search in sources :

Example 21 with DefaultSolrThreadFactory

use of org.apache.solr.util.DefaultSolrThreadFactory in project lucene-solr by apache.

the class TestInPlaceUpdatesDistrib method reorderedDeletesTest.

// The following should work: full update to doc 0, in-place update for doc 0, delete doc 0
private void reorderedDeletesTest() throws Exception {
    clearIndex();
    commit();
    buildRandomIndex(0);
    float inplace_updatable_float = 1;
    // update doc, set
    index("id", 0, "inplace_updatable_float", map("set", inplace_updatable_float));
    LEADER.commit();
    // RTG straight from the index
    SolrDocument sdoc = LEADER.getById("0");
    assertEquals(inplace_updatable_float, sdoc.get("inplace_updatable_float"));
    assertEquals("title0", sdoc.get("title_s"));
    long version0 = (long) sdoc.get("_version_");
    // put replica out of sync
    float newinplace_updatable_float = 100;
    List<UpdateRequest> updates = new ArrayList<>();
    // full update
    updates.add(simulatedUpdateRequest(null, "id", 0, "title_s", "title0_new", "inplace_updatable_float", newinplace_updatable_float, "_version_", version0 + 1));
    // inplace_updatable_float=101
    updates.add(simulatedUpdateRequest(version0 + 1, "id", 0, "inplace_updatable_float", newinplace_updatable_float + 1, "_version_", version0 + 2));
    updates.add(simulatedDeleteRequest(0, version0 + 3));
    // order the updates correctly for NONLEADER 1
    for (UpdateRequest update : updates) {
        log.info("Issuing well ordered update: " + update.getDocuments());
        NONLEADERS.get(1).request(update);
    }
    // Reordering needs to happen using parallel threads
    ExecutorService threadpool = ExecutorUtil.newMDCAwareFixedThreadPool(updates.size() + 1, new DefaultSolrThreadFactory(getTestName()));
    // re-order the updates for NONLEADER 0
    List<UpdateRequest> reorderedUpdates = new ArrayList<>(updates);
    Collections.shuffle(reorderedUpdates, r);
    List<Future<UpdateResponse>> updateResponses = new ArrayList<>();
    for (UpdateRequest update : reorderedUpdates) {
        AsyncUpdateWithRandomCommit task = new AsyncUpdateWithRandomCommit(update, NONLEADERS.get(0), random().nextLong());
        updateResponses.add(threadpool.submit(task));
        // while we can't guarantee/trust what order the updates are executed in, since multiple threads
        // are involved, but we're trying to bias the thread scheduling to run them in the order submitted
        Thread.sleep(10);
    }
    threadpool.shutdown();
    assertTrue("Thread pool didn't terminate within 15 secs", threadpool.awaitTermination(15, TimeUnit.SECONDS));
    // assert all requests were successful
    for (Future<UpdateResponse> resp : updateResponses) {
        assertEquals(0, resp.get().getStatus());
    }
    // assert both replicas have same effect
    for (SolrClient client : NONLEADERS) {
        // 0th is re-ordered replica, 1st is well-ordered replica
        SolrDocument doc = client.getById(String.valueOf(0), params("distrib", "false"));
        assertNull("This doc was supposed to have been deleted, but was: " + doc, doc);
    }
    log.info("reorderedDeletesTest: This test passed fine...");
}
Also used : UpdateRequest(org.apache.solr.client.solrj.request.UpdateRequest) ArrayList(java.util.ArrayList) DefaultSolrThreadFactory(org.apache.solr.util.DefaultSolrThreadFactory) UpdateResponse(org.apache.solr.client.solrj.response.UpdateResponse) SolrDocument(org.apache.solr.common.SolrDocument) SolrClient(org.apache.solr.client.solrj.SolrClient) HttpSolrClient(org.apache.solr.client.solrj.impl.HttpSolrClient) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future)

Example 22 with DefaultSolrThreadFactory

use of org.apache.solr.util.DefaultSolrThreadFactory in project lucene-solr by apache.

the class TestStressCloudBlindAtomicUpdates method createMiniSolrCloudCluster.

@BeforeClass
private static void createMiniSolrCloudCluster() throws Exception {
    // NOTE: numDocsToCheck uses atLeast, so nightly & multiplier are alreayd a factor in index size
    // no need to redundently factor them in here as well
    DOC_ID_INCR = TestUtil.nextInt(random(), 1, 7);
    NUM_THREADS = atLeast(3);
    EXEC_SERVICE = ExecutorUtil.newMDCAwareFixedThreadPool(NUM_THREADS, new DefaultSolrThreadFactory(DEBUG_LABEL));
    // at least 2, but don't go crazy on nightly/test.multiplier with "atLeast()"
    final int numShards = TEST_NIGHTLY ? 5 : 2;
    final int repFactor = 2;
    final int numNodes = numShards * repFactor;
    final String configName = DEBUG_LABEL + "_config-set";
    final Path configDir = Paths.get(TEST_HOME(), "collection1", "conf");
    configureCluster(numNodes).addConfig(configName, configDir).configure();
    CLOUD_CLIENT = cluster.getSolrClient();
    CLOUD_CLIENT.setDefaultCollection(COLLECTION_NAME);
    CollectionAdminRequest.createCollection(COLLECTION_NAME, configName, numShards, repFactor).withProperty("config", "solrconfig-tlog.xml").withProperty("schema", "schema-minimal-atomic-stress.xml").process(CLOUD_CLIENT);
    waitForRecoveriesToFinish(CLOUD_CLIENT);
    for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
        CLIENTS.add(getHttpSolrClient(jetty.getBaseUrl() + "/" + COLLECTION_NAME + "/"));
    }
    // sanity check no one broke the assumptions we make about our schema
    checkExpectedSchemaType(map("name", "long", "class", "solr.TrieLongField", "multiValued", Boolean.FALSE, "indexed", Boolean.FALSE, "stored", Boolean.FALSE, "docValues", Boolean.FALSE));
}
Also used : Path(java.nio.file.Path) JettySolrRunner(org.apache.solr.client.solrj.embedded.JettySolrRunner) DefaultSolrThreadFactory(org.apache.solr.util.DefaultSolrThreadFactory) BeforeClass(org.junit.BeforeClass)

Example 23 with DefaultSolrThreadFactory

use of org.apache.solr.util.DefaultSolrThreadFactory in project lucene-solr by apache.

the class EmptyRequestHandler method testRefCountMT.

@Test
public void testRefCountMT() throws Exception {
    SolrCore core = h.getCore();
    assertTrue("Refcount != 1", core.getOpenCount() == 1);
    final ClosingRequestHandler handler1 = new ClosingRequestHandler();
    handler1.inform(core);
    String path = "/this/is A path /that won't be registered!";
    SolrRequestHandler old = core.registerRequestHandler(path, handler1);
    // should not be anything...
    assertNull(old);
    assertEquals(core.getRequestHandlers().get(path), handler1);
    final int LOOP = 100;
    final int MT = 16;
    ExecutorService service = ExecutorUtil.newMDCAwareFixedThreadPool(MT, new DefaultSolrThreadFactory("refCountMT"));
    List<Callable<Integer>> callees = new ArrayList<>(MT);
    final CoreContainer cores = h.getCoreContainer();
    for (int i = 0; i < MT; ++i) {
        Callable<Integer> call = new Callable<Integer>() {

            void yield(int n) {
                try {
                    Thread.sleep(0, (n % 13 + 1) * 10);
                } catch (InterruptedException xint) {
                }
            }

            @Override
            public Integer call() {
                SolrCore core = null;
                int r = 0;
                try {
                    for (int l = 0; l < LOOP; ++l) {
                        r += 1;
                        core = cores.getCore(SolrTestCaseJ4.DEFAULT_TEST_CORENAME);
                        // sprinkle concurrency hinting...
                        yield(l);
                        assertTrue("Refcount < 1", core.getOpenCount() >= 1);
                        yield(l);
                        assertTrue("Refcount > 17", core.getOpenCount() <= 17);
                        yield(l);
                        assertTrue("Handler is closed", handler1.closed == false);
                        yield(l);
                        core.close();
                        core = null;
                        yield(l);
                    }
                    return r;
                } finally {
                    if (core != null)
                        core.close();
                }
            }
        };
        callees.add(call);
    }
    List<Future<Integer>> results = service.invokeAll(callees);
    for (Future<Integer> result : results) {
        assertTrue("loop=" + result.get() + " < " + LOOP, result.get() >= LOOP);
    }
    cores.shutdown();
    assertTrue("Refcount != 0", core.getOpenCount() == 0);
    assertTrue("Handler not closed", core.isClosed() && handler1.closed == true);
    service.shutdown();
    assertTrue("Running for too long...", service.awaitTermination(60, TimeUnit.SECONDS));
}
Also used : ArrayList(java.util.ArrayList) DefaultSolrThreadFactory(org.apache.solr.util.DefaultSolrThreadFactory) Callable(java.util.concurrent.Callable) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) SolrRequestHandler(org.apache.solr.request.SolrRequestHandler) Test(org.junit.Test)

Example 24 with DefaultSolrThreadFactory

use of org.apache.solr.util.DefaultSolrThreadFactory in project lucene-solr by apache.

the class EmptyRequestHandler method testReloadLeak.

/**
   * Test that's meant to be run with many iterations to expose a leak of SolrIndexSearcher when a core is closed
   * due to a reload. Without the fix, this test fails with most iters=1000 runs.
   */
@Test
public void testReloadLeak() throws Exception {
    final ExecutorService executor = ExecutorUtil.newMDCAwareFixedThreadPool(1, new DefaultSolrThreadFactory("testReloadLeak"));
    // Continuously open new searcher while core is not closed, and reload core to try to reproduce searcher leak.
    // While in practice we never continuously open new searchers, this is trying to make up for the fact that opening
    // a searcher in this empty core is very fast by opening new searchers continuously to increase the likelihood
    // for race.
    SolrCore core = h.getCore();
    assertTrue("Refcount != 1", core.getOpenCount() == 1);
    executor.execute(new NewSearcherRunnable(core));
    // Since we called getCore() vs getCoreInc() and don't own a refCount, the container should decRef the core
    // and close it when we call reload.
    h.reload();
    executor.shutdown();
    executor.awaitTermination(1, TimeUnit.MINUTES);
    // Check that all cores are closed and no searcher references are leaked.
    assertTrue("SolrCore " + core + " is not closed", core.isClosed());
    assertTrue(core.areAllSearcherReferencesEmpty());
}
Also used : ExecutorService(java.util.concurrent.ExecutorService) DefaultSolrThreadFactory(org.apache.solr.util.DefaultSolrThreadFactory) Test(org.junit.Test)

Example 25 with DefaultSolrThreadFactory

use of org.apache.solr.util.DefaultSolrThreadFactory in project lucene-solr by apache.

the class IndexFetcher method fetchLatestIndex.

/**
   * This command downloads all the necessary files from master to install a index commit point. Only changed files are
   * downloaded. It also downloads the conf files (if they are modified).
   *
   * @param forceReplication force a replication in all cases
   * @param forceCoreReload force a core reload in all cases
   * @return true on success, false if slave is already in sync
   * @throws IOException if an exception occurs
   */
IndexFetchResult fetchLatestIndex(boolean forceReplication, boolean forceCoreReload) throws IOException, InterruptedException {
    boolean cleanupDone = false;
    boolean successfulInstall = false;
    markReplicationStart();
    Directory tmpIndexDir = null;
    String tmpIndex;
    Directory indexDir = null;
    String indexDirPath;
    boolean deleteTmpIdxDir = true;
    File tmpTlogDir = null;
    if (!solrCore.getSolrCoreState().getLastReplicateIndexSuccess()) {
        // if the last replication was not a success, we force a full replication
        // when we are a bit more confident we may want to try a partial replication
        // if the error is connection related or something, but we have to be careful
        forceReplication = true;
        LOG.info("Last replication failed, so I'll force replication");
    }
    try {
        if (fetchFromLeader) {
            assert !solrCore.isClosed() : "Replication should be stopped before closing the core";
            Replica replica = getLeaderReplica();
            CloudDescriptor cd = solrCore.getCoreDescriptor().getCloudDescriptor();
            if (cd.getCoreNodeName().equals(replica.getName())) {
                return IndexFetchResult.EXPECTING_NON_LEADER;
            }
            if (replica.getState() != Replica.State.ACTIVE) {
                LOG.info("Replica {} is leader but it's state is {}, skipping replication", replica.getName(), replica.getState());
                return IndexFetchResult.LEADER_IS_NOT_ACTIVE;
            }
            if (!solrCore.getCoreContainer().getZkController().getClusterState().liveNodesContain(replica.getNodeName())) {
                LOG.info("Replica {} is leader but it's not hosted on a live node, skipping replication", replica.getName());
                return IndexFetchResult.LEADER_IS_NOT_ACTIVE;
            }
            if (!replica.getCoreUrl().equals(masterUrl)) {
                masterUrl = replica.getCoreUrl();
                LOG.info("Updated masterUrl to {}", masterUrl);
            // TODO: Do we need to set forceReplication = true?
            } else {
                LOG.debug("masterUrl didn't change");
            }
        }
        //get the current 'replicateable' index version in the master
        NamedList response;
        try {
            response = getLatestVersion();
        } catch (Exception e) {
            final String errorMsg = e.toString();
            if (!Strings.isNullOrEmpty(errorMsg) && errorMsg.contains(INTERRUPT_RESPONSE_MESSAGE)) {
                LOG.warn("Master at: " + masterUrl + " is not available. Index fetch failed by interrupt. Exception: " + errorMsg);
                return new IndexFetchResult(IndexFetchResult.FAILED_BY_INTERRUPT_MESSAGE, false, e);
            } else {
                LOG.warn("Master at: " + masterUrl + " is not available. Index fetch failed by exception: " + errorMsg);
                return new IndexFetchResult(IndexFetchResult.FAILED_BY_EXCEPTION_MESSAGE, false, e);
            }
        }
        long latestVersion = (Long) response.get(CMD_INDEX_VERSION);
        long latestGeneration = (Long) response.get(GENERATION);
        LOG.info("Master's generation: " + latestGeneration);
        LOG.info("Master's version: " + latestVersion);
        // TODO: make sure that getLatestCommit only returns commit points for the main index (i.e. no side-car indexes)
        IndexCommit commit = solrCore.getDeletionPolicy().getLatestCommit();
        if (commit == null) {
            // Presumably the IndexWriter hasn't been opened yet, and hence the deletion policy hasn't been updated with commit points
            RefCounted<SolrIndexSearcher> searcherRefCounted = null;
            try {
                searcherRefCounted = solrCore.getNewestSearcher(false);
                if (searcherRefCounted == null) {
                    LOG.warn("No open searcher found - fetch aborted");
                    return IndexFetchResult.NO_INDEX_COMMIT_EXIST;
                }
                commit = searcherRefCounted.get().getIndexReader().getIndexCommit();
            } finally {
                if (searcherRefCounted != null)
                    searcherRefCounted.decref();
            }
        }
        LOG.info("Slave's generation: " + commit.getGeneration());
        LOG.info("Slave's version: " + IndexDeletionPolicyWrapper.getCommitTimestamp(commit));
        if (latestVersion == 0L) {
            if (forceReplication && commit.getGeneration() != 0) {
                // since we won't get the files for an empty index,
                // we just clear ours and commit
                LOG.info("New index in Master. Deleting mine...");
                RefCounted<IndexWriter> iw = solrCore.getUpdateHandler().getSolrCoreState().getIndexWriter(solrCore);
                try {
                    iw.get().deleteAll();
                } finally {
                    iw.decref();
                }
                SolrQueryRequest req = new LocalSolrQueryRequest(solrCore, new ModifiableSolrParams());
                solrCore.getUpdateHandler().commit(new CommitUpdateCommand(req, false));
            }
            //there is nothing to be replicated
            successfulInstall = true;
            LOG.debug("Nothing to replicate, master's version is 0");
            return IndexFetchResult.MASTER_VERSION_ZERO;
        }
        // TODO: Should we be comparing timestamps (across machines) here?
        if (!forceReplication && IndexDeletionPolicyWrapper.getCommitTimestamp(commit) == latestVersion) {
            //master and slave are already in sync just return
            LOG.info("Slave in sync with master.");
            successfulInstall = true;
            return IndexFetchResult.ALREADY_IN_SYNC;
        }
        LOG.info("Starting replication process");
        // get the list of files first
        fetchFileList(latestGeneration);
        // this can happen if the commit point is deleted before we fetch the file list.
        if (filesToDownload.isEmpty()) {
            return IndexFetchResult.PEER_INDEX_COMMIT_DELETED;
        }
        LOG.info("Number of files in latest index in master: " + filesToDownload.size());
        if (tlogFilesToDownload != null) {
            LOG.info("Number of tlog files in master: " + tlogFilesToDownload.size());
        }
        // Create the sync service
        fsyncService = ExecutorUtil.newMDCAwareSingleThreadExecutor(new DefaultSolrThreadFactory("fsyncService"));
        // use a synchronized list because the list is read by other threads (to show details)
        filesDownloaded = Collections.synchronizedList(new ArrayList<Map<String, Object>>());
        // if the generation of master is older than that of the slave , it means they are not compatible to be copied
        // then a new index directory to be created and all the files need to be copied
        boolean isFullCopyNeeded = IndexDeletionPolicyWrapper.getCommitTimestamp(commit) >= latestVersion || commit.getGeneration() >= latestGeneration || forceReplication;
        String timestamp = new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date());
        String tmpIdxDirName = "index." + timestamp;
        tmpIndex = solrCore.getDataDir() + tmpIdxDirName;
        tmpIndexDir = solrCore.getDirectoryFactory().get(tmpIndex, DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType);
        // tmp dir for tlog files
        if (tlogFilesToDownload != null) {
            tmpTlogDir = new File(solrCore.getUpdateHandler().getUpdateLog().getLogDir(), "tlog." + timestamp);
        }
        // cindex dir...
        indexDirPath = solrCore.getIndexDir();
        indexDir = solrCore.getDirectoryFactory().get(indexDirPath, DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType);
        try {
            //in the metadata. If there is a mismatch for the same index file then we download the entire index again.
            if (!isFullCopyNeeded && isIndexStale(indexDir)) {
                isFullCopyNeeded = true;
            }
            if (!isFullCopyNeeded && !fetchFromLeader) {
                // and wait until we are able to clean up all unused lucene files
                if (solrCore.getCoreContainer().isZooKeeperAware()) {
                    solrCore.closeSearcher();
                }
                // rollback and reopen index writer and wait until all unused files
                // are successfully deleted
                solrCore.getUpdateHandler().newIndexWriter(true);
                RefCounted<IndexWriter> writer = solrCore.getUpdateHandler().getSolrCoreState().getIndexWriter(null);
                try {
                    IndexWriter indexWriter = writer.get();
                    int c = 0;
                    indexWriter.deleteUnusedFiles();
                    while (hasUnusedFiles(indexDir, commit)) {
                        indexWriter.deleteUnusedFiles();
                        LOG.info("Sleeping for 1000ms to wait for unused lucene index files to be delete-able");
                        Thread.sleep(1000);
                        c++;
                        if (c >= 30) {
                            LOG.warn("IndexFetcher unable to cleanup unused lucene index files so we must do a full copy instead");
                            isFullCopyNeeded = true;
                            break;
                        }
                    }
                    if (c > 0) {
                        LOG.info("IndexFetcher slept for " + (c * 1000) + "ms for unused lucene index files to be delete-able");
                    }
                } finally {
                    writer.decref();
                }
            }
            boolean reloadCore = false;
            try {
                // we have to be careful and do this after we know isFullCopyNeeded won't be flipped
                if (!isFullCopyNeeded) {
                    solrCore.getUpdateHandler().getSolrCoreState().closeIndexWriter(solrCore, true);
                }
                LOG.info("Starting download (fullCopy={}) to {}", isFullCopyNeeded, tmpIndexDir);
                successfulInstall = false;
                long bytesDownloaded = downloadIndexFiles(isFullCopyNeeded, indexDir, tmpIndexDir, latestGeneration);
                if (tlogFilesToDownload != null) {
                    bytesDownloaded += downloadTlogFiles(tmpTlogDir, latestGeneration);
                    // reload update log
                    reloadCore = true;
                }
                final long timeTakenSeconds = getReplicationTimeElapsed();
                final Long bytesDownloadedPerSecond = (timeTakenSeconds != 0 ? new Long(bytesDownloaded / timeTakenSeconds) : null);
                LOG.info("Total time taken for download (fullCopy={},bytesDownloaded={}) : {} secs ({} bytes/sec) to {}", isFullCopyNeeded, bytesDownloaded, timeTakenSeconds, bytesDownloadedPerSecond, tmpIndexDir);
                Collection<Map<String, Object>> modifiedConfFiles = getModifiedConfFiles(confFilesToDownload);
                if (!modifiedConfFiles.isEmpty()) {
                    reloadCore = true;
                    downloadConfFiles(confFilesToDownload, latestGeneration);
                    if (isFullCopyNeeded) {
                        successfulInstall = solrCore.modifyIndexProps(tmpIdxDirName);
                        if (successfulInstall)
                            deleteTmpIdxDir = false;
                    } else {
                        successfulInstall = moveIndexFiles(tmpIndexDir, indexDir);
                    }
                    if (tlogFilesToDownload != null) {
                        // move tlog files and refresh ulog only if we successfully installed a new index
                        successfulInstall &= moveTlogFiles(tmpTlogDir);
                    }
                    if (successfulInstall) {
                        if (isFullCopyNeeded) {
                            // may be closed
                            if (indexDir != null) {
                                solrCore.getDirectoryFactory().doneWithDirectory(indexDir);
                                // Cleanup all index files not associated with any *named* snapshot.
                                solrCore.deleteNonSnapshotIndexFiles(indexDirPath);
                            }
                        }
                        LOG.info("Configuration files are modified, core will be reloaded");
                        logReplicationTimeAndConfFiles(modifiedConfFiles, // write to a file time of replication and
                        successfulInstall);
                    // conf files.
                    }
                } else {
                    terminateAndWaitFsyncService();
                    if (isFullCopyNeeded) {
                        successfulInstall = solrCore.modifyIndexProps(tmpIdxDirName);
                        if (successfulInstall)
                            deleteTmpIdxDir = false;
                    } else {
                        successfulInstall = moveIndexFiles(tmpIndexDir, indexDir);
                    }
                    if (tlogFilesToDownload != null) {
                        // move tlog files and refresh ulog only if we successfully installed a new index
                        successfulInstall &= moveTlogFiles(tmpTlogDir);
                    }
                    if (successfulInstall) {
                        logReplicationTimeAndConfFiles(modifiedConfFiles, successfulInstall);
                    }
                }
            } finally {
                if (!isFullCopyNeeded) {
                    solrCore.getUpdateHandler().getSolrCoreState().openIndexWriter(solrCore);
                }
            }
            // we must reload the core after we open the IW back up
            if (successfulInstall && (reloadCore || forceCoreReload)) {
                LOG.info("Reloading SolrCore {}", solrCore.getName());
                reloadCore();
            }
            if (successfulInstall) {
                if (isFullCopyNeeded) {
                    // may be closed
                    if (indexDir != null) {
                        LOG.info("removing old index directory " + indexDir);
                        solrCore.getDirectoryFactory().doneWithDirectory(indexDir);
                        solrCore.getDirectoryFactory().remove(indexDir);
                    }
                }
                if (isFullCopyNeeded) {
                    solrCore.getUpdateHandler().newIndexWriter(isFullCopyNeeded);
                }
                openNewSearcherAndUpdateCommitPoint();
            }
            if (!isFullCopyNeeded && !forceReplication && !successfulInstall) {
                cleanup(solrCore, tmpIndexDir, indexDir, deleteTmpIdxDir, tmpTlogDir, successfulInstall);
                cleanupDone = true;
                // we try with a full copy of the index
                LOG.warn("Replication attempt was not successful - trying a full index replication reloadCore={}", reloadCore);
                successfulInstall = fetchLatestIndex(true, reloadCore).getSuccessful();
            }
            markReplicationStop();
            return successfulInstall ? IndexFetchResult.INDEX_FETCH_SUCCESS : IndexFetchResult.INDEX_FETCH_FAILURE;
        } catch (ReplicationHandlerException e) {
            LOG.error("User aborted Replication");
            return new IndexFetchResult(IndexFetchResult.FAILED_BY_EXCEPTION_MESSAGE, false, e);
        } catch (SolrException e) {
            throw e;
        } catch (InterruptedException e) {
            throw new InterruptedException("Index fetch interrupted");
        } catch (Exception e) {
            throw new SolrException(ErrorCode.SERVER_ERROR, "Index fetch failed : ", e);
        }
    } finally {
        if (!cleanupDone) {
            cleanup(solrCore, tmpIndexDir, indexDir, deleteTmpIdxDir, tmpTlogDir, successfulInstall);
        }
    }
}
Also used : ArrayList(java.util.ArrayList) CommitUpdateCommand(org.apache.solr.update.CommitUpdateCommand) ModifiableSolrParams(org.apache.solr.common.params.ModifiableSolrParams) SolrException(org.apache.solr.common.SolrException) Directory(org.apache.lucene.store.Directory) NamedList(org.apache.solr.common.util.NamedList) DefaultSolrThreadFactory(org.apache.solr.util.DefaultSolrThreadFactory) SolrIndexSearcher(org.apache.solr.search.SolrIndexSearcher) Replica(org.apache.solr.common.cloud.Replica) CloudDescriptor(org.apache.solr.cloud.CloudDescriptor) NoSuchFileException(java.nio.file.NoSuchFileException) SolrServerException(org.apache.solr.client.solrj.SolrServerException) SolrException(org.apache.solr.common.SolrException) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) IndexCommit(org.apache.lucene.index.IndexCommit) Date(java.util.Date) LocalSolrQueryRequest(org.apache.solr.request.LocalSolrQueryRequest) SolrQueryRequest(org.apache.solr.request.SolrQueryRequest) LocalSolrQueryRequest(org.apache.solr.request.LocalSolrQueryRequest) IndexWriter(org.apache.lucene.index.IndexWriter) File(java.io.File) SimpleDateFormat(java.text.SimpleDateFormat) Map(java.util.Map) HashMap(java.util.HashMap)

Aggregations

DefaultSolrThreadFactory (org.apache.solr.util.DefaultSolrThreadFactory)32 ExecutorService (java.util.concurrent.ExecutorService)19 ArrayList (java.util.ArrayList)18 Future (java.util.concurrent.Future)10 SolrException (org.apache.solr.common.SolrException)9 SolrClient (org.apache.solr.client.solrj.SolrClient)8 HttpSolrClient (org.apache.solr.client.solrj.impl.HttpSolrClient)8 IOException (java.io.IOException)7 UpdateRequest (org.apache.solr.client.solrj.request.UpdateRequest)7 Test (org.junit.Test)7 ExecutionException (java.util.concurrent.ExecutionException)6 SolrDocument (org.apache.solr.common.SolrDocument)6 UpdateResponse (org.apache.solr.client.solrj.response.UpdateResponse)5 ExecutorUtil (org.apache.solr.common.util.ExecutorUtil)5 Callable (java.util.concurrent.Callable)4 SynchronousQueue (java.util.concurrent.SynchronousQueue)4 SolrServerException (org.apache.solr.client.solrj.SolrServerException)4 ClusterState (org.apache.solr.common.cloud.ClusterState)4 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)3 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)3