use of org.apache.solr.metrics.SolrMetricManager in project lucene-solr by apache.
the class TestCoreAdmin method testCoreSwap.
@Test
public void testCoreSwap() throws Exception {
// index marker docs to core0
SolrClient cli0 = getSolrCore0();
SolrInputDocument d = new SolrInputDocument("id", "core0-0");
cli0.add(d);
d = new SolrInputDocument("id", "core0-1");
cli0.add(d);
cli0.commit();
// index a marker doc to core1
SolrClient cli1 = getSolrCore1();
d = new SolrInputDocument("id", "core1-0");
cli1.add(d);
cli1.commit();
// initial state assertions
SolrQuery q = new SolrQuery("*:*");
QueryResponse rsp = cli0.query(q);
SolrDocumentList docs = rsp.getResults();
assertEquals(2, docs.size());
docs.forEach(doc -> {
assertTrue(doc.toString(), doc.getFieldValue("id").toString().startsWith("core0-"));
});
rsp = cli1.query(q);
docs = rsp.getResults();
assertEquals(1, docs.size());
docs.forEach(doc -> {
assertTrue(doc.toString(), doc.getFieldValue("id").toString().startsWith("core1-"));
});
// assert initial metrics
SolrMetricManager metricManager = cores.getMetricManager();
String core0RegistryName = SolrCoreMetricManager.createRegistryName(false, null, null, null, "core0");
String core1RegistryName = SolrCoreMetricManager.createRegistryName(false, null, null, null, "core1");
MetricRegistry core0Registry = metricManager.registry(core0RegistryName);
MetricRegistry core1Registry = metricManager.registry(core1RegistryName);
// 2 docs + 1 commit
assertEquals(3, core0Registry.counter("UPDATE./update.requests").getCount());
// 1 doc + 1 commit
assertEquals(2, core1Registry.counter("UPDATE./update.requests").getCount());
// swap
CoreAdminRequest.swapCore("core0", "core1", getSolrAdmin());
// assert state after swap
cli0 = getSolrCore0();
cli1 = getSolrCore1();
rsp = cli0.query(q);
docs = rsp.getResults();
assertEquals(1, docs.size());
docs.forEach(doc -> {
assertTrue(doc.toString(), doc.getFieldValue("id").toString().startsWith("core1-"));
});
rsp = cli1.query(q);
docs = rsp.getResults();
assertEquals(2, docs.size());
docs.forEach(doc -> {
assertTrue(doc.toString(), doc.getFieldValue("id").toString().startsWith("core0-"));
});
core0Registry = metricManager.registry(core0RegistryName);
core1Registry = metricManager.registry(core1RegistryName);
assertEquals(2, core0Registry.counter("UPDATE./update.requests").getCount());
assertEquals(3, core1Registry.counter("UPDATE./update.requests").getCount());
}
use of org.apache.solr.metrics.SolrMetricManager in project lucene-solr by apache.
the class HdfsCollectionsAPIDistributedZkTest method moveReplicaTest.
@Test
public void moveReplicaTest() throws Exception {
cluster.waitForAllNodes(5000);
String coll = "movereplicatest_coll";
CloudSolrClient cloudClient = cluster.getSolrClient();
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf", 2, 2);
create.setMaxShardsPerNode(2);
cloudClient.request(create);
for (int i = 0; i < 10; i++) {
cloudClient.add(coll, sdoc("id", String.valueOf(i)));
cloudClient.commit(coll);
}
List<Slice> slices = new ArrayList<>(cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlices());
Collections.shuffle(slices, random());
Slice slice = null;
Replica replica = null;
for (Slice s : slices) {
slice = s;
for (Replica r : s.getReplicas()) {
if (s.getLeader() != r) {
replica = r;
}
}
}
String dataDir = getDataDir(replica);
Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
ArrayList<String> l = new ArrayList<>(liveNodes);
Collections.shuffle(l, random());
String targetNode = null;
for (String node : liveNodes) {
if (!replica.getNodeName().equals(node)) {
targetNode = node;
break;
}
}
assertNotNull(targetNode);
CollectionAdminRequest.MoveReplica moveReplica = new CollectionAdminRequest.MoveReplica(coll, replica.getName(), targetNode);
moveReplica.process(cloudClient);
checkNumOfCores(cloudClient, replica.getNodeName(), 0);
checkNumOfCores(cloudClient, targetNode, 2);
waitForState("Wait for recovery finish failed", coll, clusterShape(2, 2));
slice = cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlice(slice.getName());
boolean found = false;
for (Replica newReplica : slice.getReplicas()) {
if (getDataDir(newReplica).equals(dataDir)) {
found = true;
}
}
assertTrue(found);
// data dir is reused so replication will be skipped
for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
SolrMetricManager manager = jetty.getCoreContainer().getMetricManager();
List<String> registryNames = manager.registryNames().stream().filter(s -> s.startsWith("solr.core.")).collect(Collectors.toList());
for (String registry : registryNames) {
Map<String, Metric> metrics = manager.registry(registry).getMetrics();
Counter counter = (Counter) metrics.get("REPLICATION./replication.requests");
if (counter != null) {
assertEquals(0, counter.getCount());
}
}
}
}
use of org.apache.solr.metrics.SolrMetricManager in project lucene-solr by apache.
the class CloserThread method load.
//-------------------------------------------------------------------
// Initialization / Cleanup
//-------------------------------------------------------------------
/**
* Load the cores defined for this CoreContainer
*/
public void load() {
log.debug("Loading cores into CoreContainer [instanceDir={}]", loader.getInstancePath());
// add the sharedLib to the shared resource loader before initializing cfg based plugins
String libDir = cfg.getSharedLibDirectory();
if (libDir != null) {
Path libPath = loader.getInstancePath().resolve(libDir);
try {
loader.addToClassLoader(SolrResourceLoader.getURLs(libPath));
loader.reloadLuceneSPI();
} catch (IOException e) {
if (!libDir.equals("lib")) {
// Don't complain if default "lib" dir does not exist
log.warn("Couldn't add files from {} to classpath: {}", libPath, e.getMessage());
}
}
}
metricManager = new SolrMetricManager(loader, cfg.getMetricsConfig());
coreContainerWorkExecutor = MetricUtils.instrumentedExecutorService(coreContainerWorkExecutor, null, metricManager.registry(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node)), SolrMetricManager.mkName("coreContainerWorkExecutor", SolrInfoBean.Category.CONTAINER.toString(), "threadPool"));
shardHandlerFactory = ShardHandlerFactory.newInstance(cfg.getShardHandlerFactoryPluginInfo(), loader);
if (shardHandlerFactory instanceof SolrMetricProducer) {
SolrMetricProducer metricProducer = (SolrMetricProducer) shardHandlerFactory;
metricProducer.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), "httpShardHandler");
}
updateShardHandler = new UpdateShardHandler(cfg.getUpdateShardHandlerConfig());
updateShardHandler.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), "updateShardHandler");
transientCoreCache = TransientSolrCoreCacheFactory.newInstance(loader, this);
logging = LogWatcher.newRegisteredLogWatcher(cfg.getLogWatcherConfig(), loader);
hostName = cfg.getNodeName();
zkSys.initZooKeeper(this, solrHome, cfg.getCloudConfig());
if (isZooKeeperAware())
pkiAuthenticationPlugin = new PKIAuthenticationPlugin(this, zkSys.getZkController().getNodeName());
MDCLoggingContext.setNode(this);
securityConfHandler = isZooKeeperAware() ? new SecurityConfHandlerZk(this) : new SecurityConfHandlerLocal(this);
reloadSecurityProperties();
this.backupRepoFactory = new BackupRepositoryFactory(cfg.getBackupRepositoryPlugins());
createHandler(ZK_PATH, ZookeeperInfoHandler.class.getName(), ZookeeperInfoHandler.class);
collectionsHandler = createHandler(COLLECTIONS_HANDLER_PATH, cfg.getCollectionsHandlerClass(), CollectionsHandler.class);
infoHandler = createHandler(INFO_HANDLER_PATH, cfg.getInfoHandlerClass(), InfoHandler.class);
coreAdminHandler = createHandler(CORES_HANDLER_PATH, cfg.getCoreAdminHandlerClass(), CoreAdminHandler.class);
configSetsHandler = createHandler(CONFIGSETS_HANDLER_PATH, cfg.getConfigSetsHandlerClass(), ConfigSetsHandler.class);
metricsHandler = createHandler(METRICS_PATH, MetricsHandler.class.getName(), MetricsHandler.class);
metricsCollectorHandler = createHandler(MetricsCollectorHandler.HANDLER_PATH, MetricsCollectorHandler.class.getName(), MetricsCollectorHandler.class);
// may want to add some configuration here in the future
metricsCollectorHandler.init(null);
containerHandlers.put(AUTHZ_PATH, securityConfHandler);
securityConfHandler.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), AUTHZ_PATH);
containerHandlers.put(AUTHC_PATH, securityConfHandler);
if (pkiAuthenticationPlugin != null)
containerHandlers.put(PKIAuthenticationPlugin.PATH, pkiAuthenticationPlugin.getRequestHandler());
PluginInfo[] metricReporters = cfg.getMetricsConfig().getMetricReporters();
metricManager.loadReporters(metricReporters, loader, null, SolrInfoBean.Group.node);
metricManager.loadReporters(metricReporters, loader, null, SolrInfoBean.Group.jvm);
metricManager.loadReporters(metricReporters, loader, null, SolrInfoBean.Group.jetty);
coreConfigService = ConfigSetService.createConfigSetService(cfg, loader, zkSys.zkController);
containerProperties.putAll(cfg.getSolrProperties());
// initialize gauges for reporting the number of cores and disk total/free
String registryName = SolrMetricManager.getRegistryName(SolrInfoBean.Group.node);
metricManager.registerGauge(null, registryName, () -> solrCores.getCores().size(), true, "loaded", SolrInfoBean.Category.CONTAINER.toString(), "cores");
metricManager.registerGauge(null, registryName, () -> solrCores.getLoadedCoreNames().size() - solrCores.getCores().size(), true, "lazy", SolrInfoBean.Category.CONTAINER.toString(), "cores");
metricManager.registerGauge(null, registryName, () -> solrCores.getAllCoreNames().size() - solrCores.getLoadedCoreNames().size(), true, "unloaded", SolrInfoBean.Category.CONTAINER.toString(), "cores");
metricManager.registerGauge(null, registryName, () -> cfg.getCoreRootDirectory().toFile().getTotalSpace(), true, "totalSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs");
metricManager.registerGauge(null, registryName, () -> cfg.getCoreRootDirectory().toFile().getUsableSpace(), true, "usableSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs");
// add version information
metricManager.registerGauge(null, registryName, () -> this.getClass().getPackage().getSpecificationVersion(), true, "specification", SolrInfoBean.Category.CONTAINER.toString(), "version");
metricManager.registerGauge(null, registryName, () -> this.getClass().getPackage().getImplementationVersion(), true, "implementation", SolrInfoBean.Category.CONTAINER.toString(), "version");
SolrFieldCacheBean fieldCacheBean = new SolrFieldCacheBean();
fieldCacheBean.initializeMetrics(metricManager, registryName, null);
if (isZooKeeperAware()) {
metricManager.loadClusterReporters(metricReporters, this);
}
// setup executor to load cores in parallel
ExecutorService coreLoadExecutor = MetricUtils.instrumentedExecutorService(ExecutorUtil.newMDCAwareFixedThreadPool(cfg.getCoreLoadThreadCount(isZooKeeperAware()), new DefaultSolrThreadFactory("coreLoadExecutor")), null, metricManager.registry(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node)), SolrMetricManager.mkName("coreLoadExecutor", SolrInfoBean.Category.CONTAINER.toString(), "threadPool"));
final List<Future<SolrCore>> futures = new ArrayList<>();
try {
List<CoreDescriptor> cds = coresLocator.discover(this);
if (isZooKeeperAware()) {
//sort the cores if it is in SolrCloud. In standalone node the order does not matter
CoreSorter coreComparator = new CoreSorter().init(this);
//make a copy
cds = new ArrayList<>(cds);
Collections.sort(cds, coreComparator::compare);
}
checkForDuplicateCoreNames(cds);
status |= CORE_DISCOVERY_COMPLETE;
for (final CoreDescriptor cd : cds) {
if (cd.isTransient() || !cd.isLoadOnStartup()) {
getTransientCacheHandler().addTransientDescriptor(cd.getName(), cd);
} else if (asyncSolrCoreLoad) {
solrCores.markCoreAsLoading(cd);
}
if (cd.isLoadOnStartup()) {
futures.add(coreLoadExecutor.submit(() -> {
SolrCore core;
try {
if (zkSys.getZkController() != null) {
zkSys.getZkController().throwErrorIfReplicaReplaced(cd);
}
core = create(cd, false, false);
} finally {
if (asyncSolrCoreLoad) {
solrCores.markCoreAsNotLoading(cd);
}
}
try {
zkSys.registerInZk(core, true, false);
} catch (RuntimeException e) {
SolrException.log(log, "Error registering SolrCore", e);
}
return core;
}));
}
}
// Start the background thread
backgroundCloser = new CloserThread(this, solrCores, cfg);
backgroundCloser.start();
} finally {
if (asyncSolrCoreLoad && futures != null) {
coreContainerWorkExecutor.submit((Runnable) () -> {
try {
for (Future<SolrCore> future : futures) {
try {
future.get();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (ExecutionException e) {
log.error("Error waiting for SolrCore to be created", e);
}
}
} finally {
ExecutorUtil.shutdownAndAwaitTermination(coreLoadExecutor);
}
});
} else {
ExecutorUtil.shutdownAndAwaitTermination(coreLoadExecutor);
}
}
if (isZooKeeperAware()) {
zkSys.getZkController().checkOverseerDesignate();
}
// This is a bit redundant but these are two distinct concepts for all they're accomplished at the same time.
status |= LOAD_COMPLETE | INITIAL_CORE_LOAD_COMPLETE;
}
use of org.apache.solr.metrics.SolrMetricManager in project lucene-solr by apache.
the class DataImportHandler method initializeMetrics.
@Override
public void initializeMetrics(SolrMetricManager manager, String registryName, String scope) {
super.initializeMetrics(manager, registryName, scope);
metrics = new MetricsMap((detailed, map) -> {
if (importer != null) {
DocBuilder.Statistics cumulative = importer.cumulativeStatistics;
map.put("Status", importer.getStatus().toString());
if (importer.docBuilder != null) {
DocBuilder.Statistics running = importer.docBuilder.importStatistics;
map.put("Documents Processed", running.docCount);
map.put("Requests made to DataSource", running.queryCount);
map.put("Rows Fetched", running.rowsCount);
map.put("Documents Deleted", running.deletedDocCount);
map.put("Documents Skipped", running.skipDocCount);
}
map.put(DataImporter.MSG.TOTAL_DOC_PROCESSED, cumulative.docCount);
map.put(DataImporter.MSG.TOTAL_QUERIES_EXECUTED, cumulative.queryCount);
map.put(DataImporter.MSG.TOTAL_ROWS_EXECUTED, cumulative.rowsCount);
map.put(DataImporter.MSG.TOTAL_DOCS_DELETED, cumulative.deletedDocCount);
map.put(DataImporter.MSG.TOTAL_DOCS_SKIPPED, cumulative.skipDocCount);
}
});
manager.registerGauge(this, registryName, metrics, true, "importer", getCategory().toString(), scope);
}
use of org.apache.solr.metrics.SolrMetricManager in project lucene-solr by apache.
the class LRUCache method initializeMetrics.
@Override
public void initializeMetrics(SolrMetricManager manager, String registryName, String scope) {
registry = manager.registry(registryName);
cacheMap = new MetricsMap((detailed, res) -> {
synchronized (map) {
res.put("lookups", lookups);
res.put("hits", hits);
res.put("hitratio", calcHitRatio(lookups, hits));
res.put("inserts", inserts);
res.put("evictions", evictions);
res.put("size", map.size());
if (maxRamBytes != Long.MAX_VALUE) {
res.put("maxRamMB", maxRamBytes / 1024L / 1024L);
res.put("ramBytesUsed", ramBytesUsed());
res.put("evictionsRamUsage", evictionsRamUsage);
}
}
res.put("warmupTime", warmupTime);
long clookups = stats.lookups.longValue();
long chits = stats.hits.longValue();
res.put("cumulative_lookups", clookups);
res.put("cumulative_hits", chits);
res.put("cumulative_hitratio", calcHitRatio(clookups, chits));
res.put("cumulative_inserts", stats.inserts.longValue());
res.put("cumulative_evictions", stats.evictions.longValue());
if (maxRamBytes != Long.MAX_VALUE) {
res.put("cumulative_evictionsRamUsage", stats.evictionsRamUsage.longValue());
}
});
manager.registerGauge(this, registryName, cacheMap, true, scope, getCategory().toString());
}
Aggregations