use of org.apache.solr.util.DefaultSolrThreadFactory in project lucene-solr by apache.
the class LTRThreadModule method init.
@Override
public void init(NamedList args) {
if (args != null) {
SolrPluginUtils.invokeSetters(this, args);
}
validate();
if (this.totalPoolThreads > 1) {
ltrSemaphore = new Semaphore(totalPoolThreads);
} else {
ltrSemaphore = null;
}
createWeightScoreExecutor = new ExecutorUtil.MDCAwareThreadPoolExecutor(0, maxPoolSize, // terminate idle threads after 10 sec
keepAliveTimeSeconds, // terminate idle threads after 10 sec
TimeUnit.SECONDS, // directly hand off tasks
new SynchronousQueue<Runnable>(), new DefaultSolrThreadFactory(threadNamePrefix));
}
use of org.apache.solr.util.DefaultSolrThreadFactory in project lucene-solr by apache.
the class CloserThread method load.
//-------------------------------------------------------------------
// Initialization / Cleanup
//-------------------------------------------------------------------
/**
* Load the cores defined for this CoreContainer
*/
public void load() {
log.debug("Loading cores into CoreContainer [instanceDir={}]", loader.getInstancePath());
// add the sharedLib to the shared resource loader before initializing cfg based plugins
String libDir = cfg.getSharedLibDirectory();
if (libDir != null) {
Path libPath = loader.getInstancePath().resolve(libDir);
try {
loader.addToClassLoader(SolrResourceLoader.getURLs(libPath));
loader.reloadLuceneSPI();
} catch (IOException e) {
if (!libDir.equals("lib")) {
// Don't complain if default "lib" dir does not exist
log.warn("Couldn't add files from {} to classpath: {}", libPath, e.getMessage());
}
}
}
metricManager = new SolrMetricManager(loader, cfg.getMetricsConfig());
coreContainerWorkExecutor = MetricUtils.instrumentedExecutorService(coreContainerWorkExecutor, null, metricManager.registry(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node)), SolrMetricManager.mkName("coreContainerWorkExecutor", SolrInfoBean.Category.CONTAINER.toString(), "threadPool"));
shardHandlerFactory = ShardHandlerFactory.newInstance(cfg.getShardHandlerFactoryPluginInfo(), loader);
if (shardHandlerFactory instanceof SolrMetricProducer) {
SolrMetricProducer metricProducer = (SolrMetricProducer) shardHandlerFactory;
metricProducer.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), "httpShardHandler");
}
updateShardHandler = new UpdateShardHandler(cfg.getUpdateShardHandlerConfig());
updateShardHandler.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), "updateShardHandler");
transientCoreCache = TransientSolrCoreCacheFactory.newInstance(loader, this);
logging = LogWatcher.newRegisteredLogWatcher(cfg.getLogWatcherConfig(), loader);
hostName = cfg.getNodeName();
zkSys.initZooKeeper(this, solrHome, cfg.getCloudConfig());
if (isZooKeeperAware())
pkiAuthenticationPlugin = new PKIAuthenticationPlugin(this, zkSys.getZkController().getNodeName());
MDCLoggingContext.setNode(this);
securityConfHandler = isZooKeeperAware() ? new SecurityConfHandlerZk(this) : new SecurityConfHandlerLocal(this);
reloadSecurityProperties();
this.backupRepoFactory = new BackupRepositoryFactory(cfg.getBackupRepositoryPlugins());
createHandler(ZK_PATH, ZookeeperInfoHandler.class.getName(), ZookeeperInfoHandler.class);
collectionsHandler = createHandler(COLLECTIONS_HANDLER_PATH, cfg.getCollectionsHandlerClass(), CollectionsHandler.class);
infoHandler = createHandler(INFO_HANDLER_PATH, cfg.getInfoHandlerClass(), InfoHandler.class);
coreAdminHandler = createHandler(CORES_HANDLER_PATH, cfg.getCoreAdminHandlerClass(), CoreAdminHandler.class);
configSetsHandler = createHandler(CONFIGSETS_HANDLER_PATH, cfg.getConfigSetsHandlerClass(), ConfigSetsHandler.class);
metricsHandler = createHandler(METRICS_PATH, MetricsHandler.class.getName(), MetricsHandler.class);
metricsCollectorHandler = createHandler(MetricsCollectorHandler.HANDLER_PATH, MetricsCollectorHandler.class.getName(), MetricsCollectorHandler.class);
// may want to add some configuration here in the future
metricsCollectorHandler.init(null);
containerHandlers.put(AUTHZ_PATH, securityConfHandler);
securityConfHandler.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), AUTHZ_PATH);
containerHandlers.put(AUTHC_PATH, securityConfHandler);
if (pkiAuthenticationPlugin != null)
containerHandlers.put(PKIAuthenticationPlugin.PATH, pkiAuthenticationPlugin.getRequestHandler());
PluginInfo[] metricReporters = cfg.getMetricsConfig().getMetricReporters();
metricManager.loadReporters(metricReporters, loader, null, SolrInfoBean.Group.node);
metricManager.loadReporters(metricReporters, loader, null, SolrInfoBean.Group.jvm);
metricManager.loadReporters(metricReporters, loader, null, SolrInfoBean.Group.jetty);
coreConfigService = ConfigSetService.createConfigSetService(cfg, loader, zkSys.zkController);
containerProperties.putAll(cfg.getSolrProperties());
// initialize gauges for reporting the number of cores and disk total/free
String registryName = SolrMetricManager.getRegistryName(SolrInfoBean.Group.node);
metricManager.registerGauge(null, registryName, () -> solrCores.getCores().size(), true, "loaded", SolrInfoBean.Category.CONTAINER.toString(), "cores");
metricManager.registerGauge(null, registryName, () -> solrCores.getLoadedCoreNames().size() - solrCores.getCores().size(), true, "lazy", SolrInfoBean.Category.CONTAINER.toString(), "cores");
metricManager.registerGauge(null, registryName, () -> solrCores.getAllCoreNames().size() - solrCores.getLoadedCoreNames().size(), true, "unloaded", SolrInfoBean.Category.CONTAINER.toString(), "cores");
metricManager.registerGauge(null, registryName, () -> cfg.getCoreRootDirectory().toFile().getTotalSpace(), true, "totalSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs");
metricManager.registerGauge(null, registryName, () -> cfg.getCoreRootDirectory().toFile().getUsableSpace(), true, "usableSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs");
// add version information
metricManager.registerGauge(null, registryName, () -> this.getClass().getPackage().getSpecificationVersion(), true, "specification", SolrInfoBean.Category.CONTAINER.toString(), "version");
metricManager.registerGauge(null, registryName, () -> this.getClass().getPackage().getImplementationVersion(), true, "implementation", SolrInfoBean.Category.CONTAINER.toString(), "version");
SolrFieldCacheBean fieldCacheBean = new SolrFieldCacheBean();
fieldCacheBean.initializeMetrics(metricManager, registryName, null);
if (isZooKeeperAware()) {
metricManager.loadClusterReporters(metricReporters, this);
}
// setup executor to load cores in parallel
ExecutorService coreLoadExecutor = MetricUtils.instrumentedExecutorService(ExecutorUtil.newMDCAwareFixedThreadPool(cfg.getCoreLoadThreadCount(isZooKeeperAware()), new DefaultSolrThreadFactory("coreLoadExecutor")), null, metricManager.registry(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node)), SolrMetricManager.mkName("coreLoadExecutor", SolrInfoBean.Category.CONTAINER.toString(), "threadPool"));
final List<Future<SolrCore>> futures = new ArrayList<>();
try {
List<CoreDescriptor> cds = coresLocator.discover(this);
if (isZooKeeperAware()) {
//sort the cores if it is in SolrCloud. In standalone node the order does not matter
CoreSorter coreComparator = new CoreSorter().init(this);
//make a copy
cds = new ArrayList<>(cds);
Collections.sort(cds, coreComparator::compare);
}
checkForDuplicateCoreNames(cds);
status |= CORE_DISCOVERY_COMPLETE;
for (final CoreDescriptor cd : cds) {
if (cd.isTransient() || !cd.isLoadOnStartup()) {
getTransientCacheHandler().addTransientDescriptor(cd.getName(), cd);
} else if (asyncSolrCoreLoad) {
solrCores.markCoreAsLoading(cd);
}
if (cd.isLoadOnStartup()) {
futures.add(coreLoadExecutor.submit(() -> {
SolrCore core;
try {
if (zkSys.getZkController() != null) {
zkSys.getZkController().throwErrorIfReplicaReplaced(cd);
}
core = create(cd, false, false);
} finally {
if (asyncSolrCoreLoad) {
solrCores.markCoreAsNotLoading(cd);
}
}
try {
zkSys.registerInZk(core, true, false);
} catch (RuntimeException e) {
SolrException.log(log, "Error registering SolrCore", e);
}
return core;
}));
}
}
// Start the background thread
backgroundCloser = new CloserThread(this, solrCores, cfg);
backgroundCloser.start();
} finally {
if (asyncSolrCoreLoad && futures != null) {
coreContainerWorkExecutor.submit((Runnable) () -> {
try {
for (Future<SolrCore> future : futures) {
try {
future.get();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (ExecutionException e) {
log.error("Error waiting for SolrCore to be created", e);
}
}
} finally {
ExecutorUtil.shutdownAndAwaitTermination(coreLoadExecutor);
}
});
} else {
ExecutorUtil.shutdownAndAwaitTermination(coreLoadExecutor);
}
}
if (isZooKeeperAware()) {
zkSys.getZkController().checkOverseerDesignate();
}
// This is a bit redundant but these are two distinct concepts for all they're accomplished at the same time.
status |= LOAD_COMPLETE | INITIAL_CORE_LOAD_COMPLETE;
}
use of org.apache.solr.util.DefaultSolrThreadFactory in project lucene-solr by apache.
the class ManagedIndexSchema method waitForSchemaZkVersionAgreement.
/**
* Block up to a specified maximum time until we see agreement on the schema
* version in ZooKeeper across all replicas for a collection.
*/
public static void waitForSchemaZkVersionAgreement(String collection, String localCoreNodeName, int schemaZkVersion, ZkController zkController, int maxWaitSecs) {
RTimer timer = new RTimer();
// get a list of active replica cores to query for the schema zk version (skipping this core of course)
List<GetZkSchemaVersionCallable> concurrentTasks = new ArrayList<>();
for (String coreUrl : getActiveReplicaCoreUrls(zkController, collection, localCoreNodeName)) concurrentTasks.add(new GetZkSchemaVersionCallable(coreUrl, schemaZkVersion));
if (concurrentTasks.isEmpty())
// nothing to wait for ...
return;
log.info("Waiting up to " + maxWaitSecs + " secs for " + concurrentTasks.size() + " replicas to apply schema update version " + schemaZkVersion + " for collection " + collection);
// use an executor service to invoke schema zk version requests in parallel with a max wait time
int poolSize = Math.min(concurrentTasks.size(), 10);
ExecutorService parallelExecutor = ExecutorUtil.newMDCAwareFixedThreadPool(poolSize, new DefaultSolrThreadFactory("managedSchemaExecutor"));
try {
List<Future<Integer>> results = parallelExecutor.invokeAll(concurrentTasks, maxWaitSecs, TimeUnit.SECONDS);
// determine whether all replicas have the update
// lazily init'd
List<String> failedList = null;
for (int f = 0; f < results.size(); f++) {
int vers = -1;
Future<Integer> next = results.get(f);
if (next.isDone() && !next.isCancelled()) {
// looks to have finished, but need to check the version value too
try {
vers = next.get();
} catch (ExecutionException e) {
// shouldn't happen since we checked isCancelled
}
}
if (vers == -1) {
String coreUrl = concurrentTasks.get(f).coreUrl;
log.warn("Core " + coreUrl + " version mismatch! Expected " + schemaZkVersion + " but got " + vers);
if (failedList == null)
failedList = new ArrayList<>();
failedList.add(coreUrl);
}
}
// if any tasks haven't completed within the specified timeout, it's an error
if (failedList != null)
throw new SolrException(ErrorCode.SERVER_ERROR, failedList.size() + " out of " + (concurrentTasks.size() + 1) + " replicas failed to update their schema to version " + schemaZkVersion + " within " + maxWaitSecs + " seconds! Failed cores: " + failedList);
} catch (InterruptedException ie) {
log.warn("Core " + localCoreNodeName + " was interrupted waiting for schema version " + schemaZkVersion + " to propagate to " + concurrentTasks.size() + " replicas for collection " + collection);
Thread.currentThread().interrupt();
} finally {
if (!parallelExecutor.isShutdown())
parallelExecutor.shutdown();
}
log.info("Took {}ms for {} replicas to apply schema update version {} for collection {}", timer.getTime(), concurrentTasks.size(), schemaZkVersion, collection);
}
use of org.apache.solr.util.DefaultSolrThreadFactory in project lucene-solr by apache.
the class DocExpirationUpdateProcessorFactory method initDeleteExpiredDocsScheduler.
private void initDeleteExpiredDocsScheduler(SolrCore core) {
executor = new ScheduledThreadPoolExecutor(1, new DefaultSolrThreadFactory("autoExpireDocs"), new RejectedExecutionHandler() {
public void rejectedExecution(Runnable r, ThreadPoolExecutor e) {
log.warn("Skipping execution of '{}' using '{}'", r, e);
}
});
core.addCloseHook(new CloseHook() {
public void postClose(SolrCore core) {
if (executor.isTerminating()) {
log.info("Waiting for close of DocExpiration Executor");
ExecutorUtil.shutdownAndAwaitTermination(executor);
}
}
public void preClose(SolrCore core) {
log.info("Triggering Graceful close of DocExpiration Executor");
executor.shutdown();
}
});
executor.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
executor.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
// we don't want this firing right away, since the core may not be ready
final long initialDelay = deletePeriodSeconds;
// TODO: should we make initialDelay configurable
// TODO: should we make initialDelay some fraction of the period?
executor.scheduleAtFixedRate(new DeleteExpiredDocsRunnable(this), deletePeriodSeconds, deletePeriodSeconds, TimeUnit.SECONDS);
}
use of org.apache.solr.util.DefaultSolrThreadFactory in project lucene-solr by apache.
the class ConnectionManagerTest method testReconnectWhenZkDisappeared.
@Test
public void testReconnectWhenZkDisappeared() throws Exception {
ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(new DefaultSolrThreadFactory("connectionManagerTest"));
// setup a SolrZkClient to do some getBaseUrlForNodeName testing
String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
ZkTestServer server = new ZkTestServer(zkDir);
try {
server.run();
AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
MockZkClientConnectionStrategy strat = new MockZkClientConnectionStrategy();
SolrZkClient zkClient = new SolrZkClient(server.getZkAddress(), TIMEOUT, strat, null);
ConnectionManager cm = zkClient.getConnectionManager();
try {
assertFalse(cm.isLikelyExpired());
assertTrue(cm.isConnected());
// reconnect -- should no longer be likely expired
cm.process(new WatchedEvent(EventType.None, KeeperState.Expired, ""));
assertFalse(cm.isLikelyExpired());
assertTrue(cm.isConnected());
assertTrue(strat.isExceptionThrow());
} finally {
cm.close();
zkClient.close();
executor.shutdown();
}
} finally {
server.shutdown();
}
}
Aggregations