use of org.apache.solr.update.UpdateShardHandler in project Xponents by OpenSextant.
the class CloserThread method load.
//-------------------------------------------------------------------
// Initialization / Cleanup
//-------------------------------------------------------------------
/**
* Load the cores defined for this CoreContainer
*/
public void load() {
log.info("Loading cores into CoreContainer [instanceDir={}]", loader.getInstanceDir());
// add the sharedLib to the shared resource loader before initializing cfg based plugins
String libDir = cfg.getSharedLibDirectory();
if (libDir != null) {
File f = FileUtils.resolvePath(new File(solrHome), libDir);
log.info("loading shared library: " + f.getAbsolutePath());
loader.addToClassLoader(libDir, null, false);
loader.reloadLuceneSPI();
}
shardHandlerFactory = ShardHandlerFactory.newInstance(cfg.getShardHandlerFactoryPluginInfo(), loader);
updateShardHandler = new UpdateShardHandler(cfg);
solrCores.allocateLazyCores(cfg.getTransientCacheSize(), loader);
logging = LogWatcher.newRegisteredLogWatcher(cfg.getLogWatcherConfig(), loader);
hostName = cfg.getHost();
log.info("Host Name: " + hostName);
zkSys.initZooKeeper(this, solrHome, cfg);
collectionsHandler = createHandler(cfg.getCollectionsHandlerClass(), CollectionsHandler.class);
infoHandler = createHandler(cfg.getInfoHandlerClass(), InfoHandler.class);
coreAdminHandler = createHandler(cfg.getCoreAdminHandlerClass(), CoreAdminHandler.class);
coreConfigService = cfg.createCoreConfigService(loader, zkSys.getZkController());
containerProperties = cfg.getSolrProperties("solr");
// setup executor to load cores in parallel
// do not limit the size of the executor in zk mode since cores may try and wait for each other.
ExecutorService coreLoadExecutor = Executors.newFixedThreadPool((zkSys.getZkController() == null ? cfg.getCoreLoadThreadCount() : Integer.MAX_VALUE), new DefaultSolrThreadFactory("coreLoadExecutor"));
// OpenSextant
List<Future<SolrCore>> startupResults = Collections.emptyList();
try {
List<CoreDescriptor> cds = coresLocator.discover(this);
checkForDuplicateCoreNames(cds);
List<Callable<SolrCore>> creators = new ArrayList<>();
for (final CoreDescriptor cd : cds) {
if (cd.isTransient() || !cd.isLoadOnStartup()) {
solrCores.putDynamicDescriptor(cd.getName(), cd);
}
if (cd.isLoadOnStartup()) {
creators.add(new Callable<SolrCore>() {
@Override
public SolrCore call() throws Exception {
if (zkSys.getZkController() != null) {
zkSys.getZkController().throwErrorIfReplicaReplaced(cd);
}
return create(cd, false);
}
});
}
}
try {
// changed by OpenSextant
startupResults = coreLoadExecutor.invokeAll(creators);
} catch (InterruptedException e) {
throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "Interrupted while loading cores");
}
// Start the background thread
backgroundCloser = new CloserThread(this, solrCores, cfg);
backgroundCloser.start();
} finally {
ExecutorUtil.shutdownNowAndAwaitTermination(coreLoadExecutor);
// OpenSextant custom
for (Future<SolrCore> core : startupResults) {
try {
core.get();
log.info("Successfully loaded a core.");
} catch (InterruptedException e) {
// ignore, we've been cancelled
} catch (ExecutionException e) {
log.error("Error starting solr core.", e);
}
}
// OpenSextant custom
}
if (isZooKeeperAware()) {
// register in zk in background threads
Collection<SolrCore> cores = getCores();
if (cores != null) {
for (SolrCore core : cores) {
try {
zkSys.registerInZk(core, true);
} catch (Throwable t) {
SolrException.log(log, "Error registering SolrCore", t);
}
}
}
zkSys.getZkController().checkOverseerDesignate();
}
}
use of org.apache.solr.update.UpdateShardHandler in project lucene-solr by apache.
the class OverseerTest method testOverseerStatsReset.
@Test
public void testOverseerStatsReset() throws Exception {
String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
ZkTestServer server = new ZkTestServer(zkDir);
ZkStateReader reader = null;
MockZKController mockController = null;
SolrZkClient zkClient = null;
try {
server.run();
AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
zkClient = new SolrZkClient(server.getZkAddress(), TIMEOUT);
ZkController.createClusterZkNodes(zkClient);
reader = new ZkStateReader(zkClient);
reader.createClusterStateWatchersAndUpdate();
mockController = new MockZKController(server.getZkAddress(), "node1");
LeaderElector overseerElector = new LeaderElector(zkClient);
if (overseers.size() > 0) {
overseers.get(overseers.size() - 1).close();
overseers.get(overseers.size() - 1).getZkStateReader().getZkClient().close();
}
UpdateShardHandler updateShardHandler = new UpdateShardHandler(UpdateShardHandlerConfig.DEFAULT);
updateShardHandlers.add(updateShardHandler);
HttpShardHandlerFactory httpShardHandlerFactory = new HttpShardHandlerFactory();
httpShardHandlerFactorys.add(httpShardHandlerFactory);
Overseer overseer = new Overseer(httpShardHandlerFactory.getShardHandler(), updateShardHandler, "/admin/cores", reader, null, new CloudConfig.CloudConfigBuilder("127.0.0.1", 8983, "").build());
overseers.add(overseer);
ElectionContext ec = new OverseerElectionContext(zkClient, overseer, server.getZkAddress().replaceAll("/", "_"));
overseerElector.setup(ec);
overseerElector.joinElection(ec, false);
mockController.publishState(COLLECTION, "core1", "core_node1", Replica.State.ACTIVE, 1);
assertNotNull(overseer.getStats());
assertTrue((overseer.getStats().getSuccessCount(OverseerAction.STATE.toLower())) > 0);
// shut it down
overseer.close();
ec.cancelElection();
// start it again
overseerElector.setup(ec);
overseerElector.joinElection(ec, false);
assertNotNull(overseer.getStats());
assertEquals(0, (overseer.getStats().getSuccessCount(OverseerAction.STATE.toLower())));
} finally {
close(mockController);
close(zkClient);
close(reader);
server.shutdown();
}
}
use of org.apache.solr.update.UpdateShardHandler in project lucene-solr by apache.
the class OverseerTest method tearDown.
@After
public void tearDown() throws Exception {
super.tearDown();
for (Overseer overseer : overseers) {
overseer.close();
}
overseers.clear();
for (ZkStateReader reader : readers) {
reader.close();
}
readers.clear();
for (HttpShardHandlerFactory handlerFactory : httpShardHandlerFactorys) {
handlerFactory.close();
}
httpShardHandlerFactorys.clear();
for (UpdateShardHandler updateShardHandler : updateShardHandlers) {
updateShardHandler.close();
}
updateShardHandlers.clear();
}
use of org.apache.solr.update.UpdateShardHandler in project lucene-solr by apache.
the class CloserThread method load.
//-------------------------------------------------------------------
// Initialization / Cleanup
//-------------------------------------------------------------------
/**
* Load the cores defined for this CoreContainer
*/
public void load() {
log.debug("Loading cores into CoreContainer [instanceDir={}]", loader.getInstancePath());
// add the sharedLib to the shared resource loader before initializing cfg based plugins
String libDir = cfg.getSharedLibDirectory();
if (libDir != null) {
Path libPath = loader.getInstancePath().resolve(libDir);
try {
loader.addToClassLoader(SolrResourceLoader.getURLs(libPath));
loader.reloadLuceneSPI();
} catch (IOException e) {
if (!libDir.equals("lib")) {
// Don't complain if default "lib" dir does not exist
log.warn("Couldn't add files from {} to classpath: {}", libPath, e.getMessage());
}
}
}
metricManager = new SolrMetricManager(loader, cfg.getMetricsConfig());
coreContainerWorkExecutor = MetricUtils.instrumentedExecutorService(coreContainerWorkExecutor, null, metricManager.registry(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node)), SolrMetricManager.mkName("coreContainerWorkExecutor", SolrInfoBean.Category.CONTAINER.toString(), "threadPool"));
shardHandlerFactory = ShardHandlerFactory.newInstance(cfg.getShardHandlerFactoryPluginInfo(), loader);
if (shardHandlerFactory instanceof SolrMetricProducer) {
SolrMetricProducer metricProducer = (SolrMetricProducer) shardHandlerFactory;
metricProducer.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), "httpShardHandler");
}
updateShardHandler = new UpdateShardHandler(cfg.getUpdateShardHandlerConfig());
updateShardHandler.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), "updateShardHandler");
transientCoreCache = TransientSolrCoreCacheFactory.newInstance(loader, this);
logging = LogWatcher.newRegisteredLogWatcher(cfg.getLogWatcherConfig(), loader);
hostName = cfg.getNodeName();
zkSys.initZooKeeper(this, solrHome, cfg.getCloudConfig());
if (isZooKeeperAware())
pkiAuthenticationPlugin = new PKIAuthenticationPlugin(this, zkSys.getZkController().getNodeName());
MDCLoggingContext.setNode(this);
securityConfHandler = isZooKeeperAware() ? new SecurityConfHandlerZk(this) : new SecurityConfHandlerLocal(this);
reloadSecurityProperties();
this.backupRepoFactory = new BackupRepositoryFactory(cfg.getBackupRepositoryPlugins());
createHandler(ZK_PATH, ZookeeperInfoHandler.class.getName(), ZookeeperInfoHandler.class);
collectionsHandler = createHandler(COLLECTIONS_HANDLER_PATH, cfg.getCollectionsHandlerClass(), CollectionsHandler.class);
infoHandler = createHandler(INFO_HANDLER_PATH, cfg.getInfoHandlerClass(), InfoHandler.class);
coreAdminHandler = createHandler(CORES_HANDLER_PATH, cfg.getCoreAdminHandlerClass(), CoreAdminHandler.class);
configSetsHandler = createHandler(CONFIGSETS_HANDLER_PATH, cfg.getConfigSetsHandlerClass(), ConfigSetsHandler.class);
metricsHandler = createHandler(METRICS_PATH, MetricsHandler.class.getName(), MetricsHandler.class);
metricsCollectorHandler = createHandler(MetricsCollectorHandler.HANDLER_PATH, MetricsCollectorHandler.class.getName(), MetricsCollectorHandler.class);
// may want to add some configuration here in the future
metricsCollectorHandler.init(null);
containerHandlers.put(AUTHZ_PATH, securityConfHandler);
securityConfHandler.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), AUTHZ_PATH);
containerHandlers.put(AUTHC_PATH, securityConfHandler);
if (pkiAuthenticationPlugin != null)
containerHandlers.put(PKIAuthenticationPlugin.PATH, pkiAuthenticationPlugin.getRequestHandler());
PluginInfo[] metricReporters = cfg.getMetricsConfig().getMetricReporters();
metricManager.loadReporters(metricReporters, loader, null, SolrInfoBean.Group.node);
metricManager.loadReporters(metricReporters, loader, null, SolrInfoBean.Group.jvm);
metricManager.loadReporters(metricReporters, loader, null, SolrInfoBean.Group.jetty);
coreConfigService = ConfigSetService.createConfigSetService(cfg, loader, zkSys.zkController);
containerProperties.putAll(cfg.getSolrProperties());
// initialize gauges for reporting the number of cores and disk total/free
String registryName = SolrMetricManager.getRegistryName(SolrInfoBean.Group.node);
metricManager.registerGauge(null, registryName, () -> solrCores.getCores().size(), true, "loaded", SolrInfoBean.Category.CONTAINER.toString(), "cores");
metricManager.registerGauge(null, registryName, () -> solrCores.getLoadedCoreNames().size() - solrCores.getCores().size(), true, "lazy", SolrInfoBean.Category.CONTAINER.toString(), "cores");
metricManager.registerGauge(null, registryName, () -> solrCores.getAllCoreNames().size() - solrCores.getLoadedCoreNames().size(), true, "unloaded", SolrInfoBean.Category.CONTAINER.toString(), "cores");
metricManager.registerGauge(null, registryName, () -> cfg.getCoreRootDirectory().toFile().getTotalSpace(), true, "totalSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs");
metricManager.registerGauge(null, registryName, () -> cfg.getCoreRootDirectory().toFile().getUsableSpace(), true, "usableSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs");
// add version information
metricManager.registerGauge(null, registryName, () -> this.getClass().getPackage().getSpecificationVersion(), true, "specification", SolrInfoBean.Category.CONTAINER.toString(), "version");
metricManager.registerGauge(null, registryName, () -> this.getClass().getPackage().getImplementationVersion(), true, "implementation", SolrInfoBean.Category.CONTAINER.toString(), "version");
SolrFieldCacheBean fieldCacheBean = new SolrFieldCacheBean();
fieldCacheBean.initializeMetrics(metricManager, registryName, null);
if (isZooKeeperAware()) {
metricManager.loadClusterReporters(metricReporters, this);
}
// setup executor to load cores in parallel
ExecutorService coreLoadExecutor = MetricUtils.instrumentedExecutorService(ExecutorUtil.newMDCAwareFixedThreadPool(cfg.getCoreLoadThreadCount(isZooKeeperAware()), new DefaultSolrThreadFactory("coreLoadExecutor")), null, metricManager.registry(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node)), SolrMetricManager.mkName("coreLoadExecutor", SolrInfoBean.Category.CONTAINER.toString(), "threadPool"));
final List<Future<SolrCore>> futures = new ArrayList<>();
try {
List<CoreDescriptor> cds = coresLocator.discover(this);
if (isZooKeeperAware()) {
//sort the cores if it is in SolrCloud. In standalone node the order does not matter
CoreSorter coreComparator = new CoreSorter().init(this);
//make a copy
cds = new ArrayList<>(cds);
Collections.sort(cds, coreComparator::compare);
}
checkForDuplicateCoreNames(cds);
status |= CORE_DISCOVERY_COMPLETE;
for (final CoreDescriptor cd : cds) {
if (cd.isTransient() || !cd.isLoadOnStartup()) {
getTransientCacheHandler().addTransientDescriptor(cd.getName(), cd);
} else if (asyncSolrCoreLoad) {
solrCores.markCoreAsLoading(cd);
}
if (cd.isLoadOnStartup()) {
futures.add(coreLoadExecutor.submit(() -> {
SolrCore core;
try {
if (zkSys.getZkController() != null) {
zkSys.getZkController().throwErrorIfReplicaReplaced(cd);
}
core = create(cd, false, false);
} finally {
if (asyncSolrCoreLoad) {
solrCores.markCoreAsNotLoading(cd);
}
}
try {
zkSys.registerInZk(core, true, false);
} catch (RuntimeException e) {
SolrException.log(log, "Error registering SolrCore", e);
}
return core;
}));
}
}
// Start the background thread
backgroundCloser = new CloserThread(this, solrCores, cfg);
backgroundCloser.start();
} finally {
if (asyncSolrCoreLoad && futures != null) {
coreContainerWorkExecutor.submit((Runnable) () -> {
try {
for (Future<SolrCore> future : futures) {
try {
future.get();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (ExecutionException e) {
log.error("Error waiting for SolrCore to be created", e);
}
}
} finally {
ExecutorUtil.shutdownAndAwaitTermination(coreLoadExecutor);
}
});
} else {
ExecutorUtil.shutdownAndAwaitTermination(coreLoadExecutor);
}
}
if (isZooKeeperAware()) {
zkSys.getZkController().checkOverseerDesignate();
}
// This is a bit redundant but these are two distinct concepts for all they're accomplished at the same time.
status |= LOAD_COMPLETE | INITIAL_CORE_LOAD_COMPLETE;
}
use of org.apache.solr.update.UpdateShardHandler in project lucene-solr by apache.
the class ChaosMonkeyShardSplitTest method electNewOverseer.
/**
* Elects a new overseer
*
* @return SolrZkClient
*/
private SolrZkClient electNewOverseer(String address) throws KeeperException, InterruptedException, IOException {
SolrZkClient zkClient = new SolrZkClient(address, TIMEOUT);
ZkStateReader reader = new ZkStateReader(zkClient);
LeaderElector overseerElector = new LeaderElector(zkClient);
UpdateShardHandler updateShardHandler = new UpdateShardHandler(UpdateShardHandlerConfig.DEFAULT);
// TODO: close Overseer
Overseer overseer = new Overseer(new HttpShardHandlerFactory().getShardHandler(), updateShardHandler, "/admin/cores", reader, null, new CloudConfig.CloudConfigBuilder("127.0.0.1", 8983, "solr").build());
overseer.close();
ElectionContext ec = new OverseerElectionContext(zkClient, overseer, address.replaceAll("/", "_"));
overseerElector.setup(ec);
overseerElector.joinElection(ec, false);
reader.close();
return zkClient;
}
Aggregations