use of org.apache.solr.util.DefaultSolrThreadFactory in project Xponents by OpenSextant.
the class CloserThread method load.
//-------------------------------------------------------------------
// Initialization / Cleanup
//-------------------------------------------------------------------
/**
* Load the cores defined for this CoreContainer
*/
public void load() {
log.info("Loading cores into CoreContainer [instanceDir={}]", loader.getInstanceDir());
// add the sharedLib to the shared resource loader before initializing cfg based plugins
String libDir = cfg.getSharedLibDirectory();
if (libDir != null) {
File f = FileUtils.resolvePath(new File(solrHome), libDir);
log.info("loading shared library: " + f.getAbsolutePath());
loader.addToClassLoader(libDir, null, false);
loader.reloadLuceneSPI();
}
shardHandlerFactory = ShardHandlerFactory.newInstance(cfg.getShardHandlerFactoryPluginInfo(), loader);
updateShardHandler = new UpdateShardHandler(cfg);
solrCores.allocateLazyCores(cfg.getTransientCacheSize(), loader);
logging = LogWatcher.newRegisteredLogWatcher(cfg.getLogWatcherConfig(), loader);
hostName = cfg.getHost();
log.info("Host Name: " + hostName);
zkSys.initZooKeeper(this, solrHome, cfg);
collectionsHandler = createHandler(cfg.getCollectionsHandlerClass(), CollectionsHandler.class);
infoHandler = createHandler(cfg.getInfoHandlerClass(), InfoHandler.class);
coreAdminHandler = createHandler(cfg.getCoreAdminHandlerClass(), CoreAdminHandler.class);
coreConfigService = cfg.createCoreConfigService(loader, zkSys.getZkController());
containerProperties = cfg.getSolrProperties("solr");
// setup executor to load cores in parallel
// do not limit the size of the executor in zk mode since cores may try and wait for each other.
ExecutorService coreLoadExecutor = Executors.newFixedThreadPool((zkSys.getZkController() == null ? cfg.getCoreLoadThreadCount() : Integer.MAX_VALUE), new DefaultSolrThreadFactory("coreLoadExecutor"));
// OpenSextant
List<Future<SolrCore>> startupResults = Collections.emptyList();
try {
List<CoreDescriptor> cds = coresLocator.discover(this);
checkForDuplicateCoreNames(cds);
List<Callable<SolrCore>> creators = new ArrayList<>();
for (final CoreDescriptor cd : cds) {
if (cd.isTransient() || !cd.isLoadOnStartup()) {
solrCores.putDynamicDescriptor(cd.getName(), cd);
}
if (cd.isLoadOnStartup()) {
creators.add(new Callable<SolrCore>() {
@Override
public SolrCore call() throws Exception {
if (zkSys.getZkController() != null) {
zkSys.getZkController().throwErrorIfReplicaReplaced(cd);
}
return create(cd, false);
}
});
}
}
try {
// changed by OpenSextant
startupResults = coreLoadExecutor.invokeAll(creators);
} catch (InterruptedException e) {
throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "Interrupted while loading cores");
}
// Start the background thread
backgroundCloser = new CloserThread(this, solrCores, cfg);
backgroundCloser.start();
} finally {
ExecutorUtil.shutdownNowAndAwaitTermination(coreLoadExecutor);
// OpenSextant custom
for (Future<SolrCore> core : startupResults) {
try {
core.get();
log.info("Successfully loaded a core.");
} catch (InterruptedException e) {
// ignore, we've been cancelled
} catch (ExecutionException e) {
log.error("Error starting solr core.", e);
}
}
// OpenSextant custom
}
if (isZooKeeperAware()) {
// register in zk in background threads
Collection<SolrCore> cores = getCores();
if (cores != null) {
for (SolrCore core : cores) {
try {
zkSys.registerInZk(core, true);
} catch (Throwable t) {
SolrException.log(log, "Error registering SolrCore", t);
}
}
}
zkSys.getZkController().checkOverseerDesignate();
}
}
use of org.apache.solr.util.DefaultSolrThreadFactory in project lucene-solr by apache.
the class CdcrReplicationHandlerTest method testReplicationWithBufferedUpdates.
/**
* Test the scenario where the slave is killed while the leader is still receiving updates.
* The slave should buffer updates while in recovery, then replay them at the end of the recovery.
* If updates were properly buffered and replayed, then the slave should have the same number of documents
* than the leader. This checks if cdcr tlog replication interferes with buffered updates - SOLR-8263.
*/
@Test
@ShardsFixed(num = 2)
public void testReplicationWithBufferedUpdates() throws Exception {
List<CloudJettyRunner> slaves = this.getShardToSlaveJetty(SOURCE_COLLECTION, SHARD1);
AtomicInteger numDocs = new AtomicInteger(0);
ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(new DefaultSolrThreadFactory("cdcr-test-update-scheduler"));
executor.scheduleWithFixedDelay(new UpdateThread(numDocs), 10, 10, TimeUnit.MILLISECONDS);
// Restart the slave node to trigger Replication strategy
this.restartServer(slaves.get(0));
// shutdown the update thread and wait for its completion
executor.shutdown();
executor.awaitTermination(500, TimeUnit.MILLISECONDS);
// check that we have the expected number of documents in the cluster
assertNumDocs(numDocs.get(), SOURCE_COLLECTION);
// check that we have the expected number of documents on the slave
assertNumDocs(numDocs.get(), slaves.get(0));
}
use of org.apache.solr.util.DefaultSolrThreadFactory in project lucene-solr by apache.
the class LeaderElectionTest method testStressElection.
@Test
public void testStressElection() throws Exception {
final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(15, new DefaultSolrThreadFactory("stressElection"));
final List<ClientThread> threads = Collections.synchronizedList(new ArrayList<ClientThread>());
// start with a leader
ClientThread thread1 = null;
thread1 = new ClientThread("shard1", 0);
threads.add(thread1);
scheduler.schedule(thread1, 0, TimeUnit.MILLISECONDS);
Thread.sleep(2000);
Thread scheduleThread = new Thread() {
@Override
public void run() {
int count = atLeast(5);
for (int i = 1; i < count; i++) {
int launchIn = random().nextInt(500);
ClientThread thread = null;
try {
thread = new ClientThread("shard1", i);
} catch (Exception e) {
//
}
if (thread != null) {
threads.add(thread);
scheduler.schedule(thread, launchIn, TimeUnit.MILLISECONDS);
}
}
}
};
Thread killThread = new Thread() {
@Override
public void run() {
while (!stopStress) {
try {
int j;
try {
// always 1 we won't kill...
j = random().nextInt(threads.size() - 2);
} catch (IllegalArgumentException e) {
continue;
}
try {
threads.get(j).close();
} catch (Exception e) {
}
Thread.sleep(10);
} catch (Exception e) {
}
}
}
};
Thread connLossThread = new Thread() {
@Override
public void run() {
while (!stopStress) {
try {
Thread.sleep(50);
int j;
j = random().nextInt(threads.size());
try {
threads.get(j).es.zkClient.getSolrZooKeeper().closeCnxn();
if (random().nextBoolean()) {
long sessionId = zkClient.getSolrZooKeeper().getSessionId();
server.expire(sessionId);
}
} catch (Exception e) {
e.printStackTrace();
}
Thread.sleep(500);
} catch (Exception e) {
}
}
}
};
scheduleThread.start();
connLossThread.start();
killThread.start();
Thread.sleep(4000);
stopStress = true;
scheduleThread.interrupt();
connLossThread.interrupt();
killThread.interrupt();
scheduleThread.join();
scheduler.shutdownNow();
connLossThread.join();
killThread.join();
int seq = threads.get(getLeaderThread()).getSeq();
// cleanup any threads still running
for (ClientThread thread : threads) {
thread.es.zkClient.getSolrZooKeeper().close();
thread.close();
}
for (Thread thread : threads) {
thread.join();
}
}
use of org.apache.solr.util.DefaultSolrThreadFactory in project lucene-solr by apache.
the class TestDocBasedVersionConstraints method testConcurrentAdds.
/**
* Constantly hammer the same doc with multiple concurrent threads and diff versions,
* confirm that the highest version wins.
*/
public void testConcurrentAdds() throws Exception {
final int NUM_DOCS = atLeast(50);
final int MAX_CONCURENT = atLeast(10);
ExecutorService runner = ExecutorUtil.newMDCAwareFixedThreadPool(MAX_CONCURENT, new DefaultSolrThreadFactory("TestDocBasedVersionConstraints"));
// runner = Executors.newFixedThreadPool(1); // to test single threaded
try {
for (int id = 0; id < NUM_DOCS; id++) {
final int numAdds = TestUtil.nextInt(random(), 3, MAX_CONCURENT);
final int winner = TestUtil.nextInt(random(), 0, numAdds - 1);
final int winnerVersion = atLeast(100);
final boolean winnerIsDeleted = (0 == TestUtil.nextInt(random(), 0, 4));
List<Callable<Object>> tasks = new ArrayList<>(numAdds);
for (int variant = 0; variant < numAdds; variant++) {
final boolean iShouldWin = (variant == winner);
final long version = (iShouldWin ? winnerVersion : TestUtil.nextInt(random(), 1, winnerVersion - 1));
if ((iShouldWin && winnerIsDeleted) || (!iShouldWin && 0 == TestUtil.nextInt(random(), 0, 4))) {
tasks.add(delayedDelete("" + id, "" + version));
} else {
tasks.add(delayedAdd("id", "" + id, "name", "name" + id + "_" + variant, "my_version_l", "" + version));
}
}
runner.invokeAll(tasks);
final String expectedDoc = "{'id':'" + id + "','my_version_l':" + winnerVersion + (!winnerIsDeleted ? ",'name':'name" + id + "_" + winner + "'}" : "}");
assertJQ(req("qt", "/get", "id", "" + id, "fl", "id,name,my_version_l"), "=={'doc':" + expectedDoc + "}");
assertU(commit());
assertJQ(req("q", "id:" + id, "fl", "id,name,my_version_l"), "/response/numFound==1", "/response/docs==[" + expectedDoc + "]");
}
} finally {
ExecutorUtil.shutdownAndAwaitTermination(runner);
}
}
use of org.apache.solr.util.DefaultSolrThreadFactory in project lucene-solr by apache.
the class TestInPlaceUpdatesDistrib method reorderedDBQsResurrectionTest.
/* Test for a situation when a document requiring in-place update cannot be "resurrected"
* when the original full indexed document has been deleted by an out of order DBQ.
* Expected behaviour in this case should be to throw the replica into LIR (since this will
* be rare). Here's an example of the situation:
ADD(id=x, val=5, ver=1)
UPD(id=x, val=10, ver = 2)
DBQ(q=val:10, v=4)
DV(id=x, val=5, ver=3)
*/
private void reorderedDBQsResurrectionTest() throws Exception {
if (onlyLeaderIndexes) {
log.info("RTG with DBQs are not working in tlog replicas");
return;
}
clearIndex();
commit();
buildRandomIndex(0);
// RTG straight from the index
SolrDocument sdoc = LEADER.getById("0");
//assertEquals(value, sdoc.get("inplace_updatable_float"));
assertEquals("title0", sdoc.get("title_s"));
long version0 = (long) sdoc.get("_version_");
String field = "inplace_updatable_int";
// put replica out of sync
List<UpdateRequest> updates = new ArrayList<>();
// full update
updates.add(simulatedUpdateRequest(null, "id", 0, "title_s", "title0_new", field, 5, "_version_", version0 + 1));
// inplace_updatable_float=101
updates.add(simulatedUpdateRequest(version0 + 1, "id", 0, field, 10, "_version_", version0 + 2));
// inplace_updatable_float=101
updates.add(simulatedUpdateRequest(version0 + 2, "id", 0, field, 5, "_version_", version0 + 3));
// supposed to not delete anything
updates.add(simulatedDeleteRequest(field + ":10", version0 + 4));
// order the updates correctly for NONLEADER 1
for (UpdateRequest update : updates) {
log.info("Issuing well ordered update: " + update.getDocuments());
NONLEADERS.get(1).request(update);
}
// Reordering needs to happen using parallel threads
ExecutorService threadpool = ExecutorUtil.newMDCAwareFixedThreadPool(updates.size() + 1, new DefaultSolrThreadFactory(getTestName()));
// re-order the last two updates for NONLEADER 0
List<UpdateRequest> reorderedUpdates = new ArrayList<>(updates);
Collections.swap(reorderedUpdates, 2, 3);
List<Future<UpdateResponse>> updateResponses = new ArrayList<>();
for (UpdateRequest update : reorderedUpdates) {
// pretend as this update is coming from the other non-leader, so that
// the resurrection can happen from there (instead of the leader)
update.setParam(DistributedUpdateProcessor.DISTRIB_FROM, ((HttpSolrClient) NONLEADERS.get(1)).getBaseURL());
AsyncUpdateWithRandomCommit task = new AsyncUpdateWithRandomCommit(update, NONLEADERS.get(0), random().nextLong());
updateResponses.add(threadpool.submit(task));
// while we can't guarantee/trust what order the updates are executed in, since multiple threads
// are involved, but we're trying to bias the thread scheduling to run them in the order submitted
Thread.sleep(10);
}
threadpool.shutdown();
assertTrue("Thread pool didn't terminate within 15 secs", threadpool.awaitTermination(15, TimeUnit.SECONDS));
int successful = 0;
for (Future<UpdateResponse> resp : updateResponses) {
try {
UpdateResponse r = resp.get();
if (r.getStatus() == 0) {
successful++;
}
} catch (Exception ex) {
if (!ex.getMessage().contains("Tried to fetch missing update" + " from the leader, but missing wasn't present at leader.")) {
throw ex;
}
}
}
// All should succeed, i.e. no LIR
assertEquals(updateResponses.size(), successful);
log.info("Non leader 0: " + ((HttpSolrClient) NONLEADERS.get(0)).getBaseURL());
log.info("Non leader 1: " + ((HttpSolrClient) NONLEADERS.get(1)).getBaseURL());
SolrDocument doc0 = NONLEADERS.get(0).getById(String.valueOf(0), params("distrib", "false"));
SolrDocument doc1 = NONLEADERS.get(1).getById(String.valueOf(0), params("distrib", "false"));
log.info("Doc in both replica 0: " + doc0);
log.info("Doc in both replica 1: " + doc1);
// assert both replicas have same effect
for (int i = 0; i < NONLEADERS.size(); i++) {
// 0th is re-ordered replica, 1st is well-ordered replica
SolrClient client = NONLEADERS.get(i);
SolrDocument doc = client.getById(String.valueOf(0), params("distrib", "false"));
assertNotNull("Client: " + ((HttpSolrClient) client).getBaseURL(), doc);
assertEquals("Client: " + ((HttpSolrClient) client).getBaseURL(), 5, doc.getFieldValue(field));
}
log.info("reorderedDBQsResurrectionTest: This test passed fine...");
clearIndex();
commit();
}
Aggregations