use of com.codahale.metrics.jvm.GarbageCollectorMetricSet in project bookkeeper by apache.
the class CodahaleMetricsProvider method initIfNecessary.
synchronized void initIfNecessary() {
if (metrics == null) {
metrics = new MetricRegistry();
metrics.registerAll(new MemoryUsageGaugeSet());
metrics.registerAll(new GarbageCollectorMetricSet());
}
}
use of com.codahale.metrics.jvm.GarbageCollectorMetricSet in project nakadi by zalando.
the class MetricsConfig method metricRegistry.
@Bean
public MetricRegistry metricRegistry() {
final MetricRegistry metricRegistry = new MetricRegistry();
metricRegistry.register("jvm.gc", new GarbageCollectorMetricSet());
metricRegistry.register("jvm.buffers", new BufferPoolMetricSet(ManagementFactory.getPlatformMBeanServer()));
metricRegistry.register("jvm.memory", new MemoryUsageGaugeSet());
metricRegistry.register("jvm.threads", new ThreadStatesGaugeSet());
return metricRegistry;
}
use of com.codahale.metrics.jvm.GarbageCollectorMetricSet in project stdlib by petergeneric.
the class CoreMetricsModule method buildRegistry.
public static MetricRegistry buildRegistry() {
MetricRegistry registry = new MetricRegistry();
registry.register(MetricRegistry.name("jvm", "gc"), new GarbageCollectorMetricSet());
registry.register(MetricRegistry.name("jvm", "memory"), new MemoryUsageGaugeSet());
registry.register(MetricRegistry.name("jvm", "thread-states"), new ThreadStatesGaugeSet());
registry.register(MetricRegistry.name("jvm", "fd", "usage"), new FileDescriptorRatioGauge());
return registry;
}
use of com.codahale.metrics.jvm.GarbageCollectorMetricSet in project cassandra by apache.
the class CassandraDaemon method setup.
/**
* This is a hook for concrete daemons to initialize themselves suitably.
*
* Subclasses should override this to finish the job (listening on ports, etc.)
*/
protected void setup() {
FileUtils.setFSErrorHandler(new DefaultFSErrorHandler());
// snapshots and upgrading system tables.
try {
migrateSystemDataIfNeeded();
} catch (IOException e) {
exitOrFail(StartupException.ERR_WRONG_DISK_STATE, e.getMessage(), e);
}
maybeInitJmx();
Mx4jTool.maybeLoad();
ThreadAwareSecurityManager.install();
logSystemInfo();
NativeLibrary.tryMlockall();
CommitLog.instance.start();
runStartupChecks();
try {
SystemKeyspace.snapshotOnVersionChange();
} catch (IOException e) {
exitOrFail(StartupException.ERR_WRONG_DISK_STATE, e.getMessage(), e.getCause());
}
// We need to persist this as soon as possible after startup checks.
// This should be the first write to SystemKeyspace (CASSANDRA-11742)
SystemKeyspace.persistLocalMetadata();
Thread.setDefaultUncaughtExceptionHandler(JVMStabilityInspector::uncaughtException);
SystemKeyspaceMigrator40.migrate();
// Populate token metadata before flushing, for token-aware sstable partitioning (#6696)
StorageService.instance.populateTokenMetadata();
try {
// load schema from disk
Schema.instance.loadFromDisk();
} catch (Exception e) {
logger.error("Error while loading schema: ", e);
throw e;
}
setupVirtualKeyspaces();
SSTableHeaderFix.fixNonFrozenUDTIfUpgradeFrom30();
// clean up debris in the rest of the keyspaces
for (String keyspaceName : Schema.instance.getKeyspaces()) {
// Skip system as we've already cleaned it
if (keyspaceName.equals(SchemaConstants.SYSTEM_KEYSPACE_NAME))
continue;
for (TableMetadata cfm : Schema.instance.getTablesAndViews(keyspaceName)) {
try {
ColumnFamilyStore.scrubDataDirectories(cfm);
} catch (StartupException e) {
exitOrFail(e.returnCode, e.getMessage(), e.getCause());
}
}
}
Keyspace.setInitialized();
// initialize keyspaces
for (String keyspaceName : Schema.instance.getKeyspaces()) {
if (logger.isDebugEnabled())
logger.debug("opening keyspace {}", keyspaceName);
// disable auto compaction until gossip settles since disk boundaries may be affected by ring layout
for (ColumnFamilyStore cfs : Keyspace.open(keyspaceName).getColumnFamilyStores()) {
for (ColumnFamilyStore store : cfs.concatWithIndexes()) {
store.disableAutoCompaction();
}
}
}
try {
loadRowAndKeyCacheAsync().get();
} catch (Throwable t) {
JVMStabilityInspector.inspectThrowable(t);
logger.warn("Error loading key or row cache", t);
}
try {
GCInspector.register();
} catch (Throwable t) {
JVMStabilityInspector.inspectThrowable(t);
logger.warn("Unable to start GCInspector (currently only supported on the Sun JVM)");
}
// Replay any CommitLogSegments found on disk
try {
CommitLog.instance.recoverSegmentsOnDisk();
} catch (IOException e) {
throw new RuntimeException(e);
}
// Re-populate token metadata after commit log recover (new peers might be loaded onto system keyspace #10293)
StorageService.instance.populateTokenMetadata();
SystemKeyspace.finishStartup();
// Clean up system.size_estimates entries left lying around from missed keyspace drops (CASSANDRA-14905)
StorageService.instance.cleanupSizeEstimates();
// schedule periodic dumps of table size estimates into SystemKeyspace.SIZE_ESTIMATES_CF
// set cassandra.size_recorder_interval to 0 to disable
int sizeRecorderInterval = Integer.getInteger("cassandra.size_recorder_interval", 5 * 60);
if (sizeRecorderInterval > 0)
ScheduledExecutors.optionalTasks.scheduleWithFixedDelay(SizeEstimatesRecorder.instance, 30, sizeRecorderInterval, TimeUnit.SECONDS);
ActiveRepairService.instance.start();
// Prepared statements
QueryProcessor.instance.preloadPreparedStatements();
// Metrics
String metricsReporterConfigFile = System.getProperty("cassandra.metricsReporterConfigFile");
if (metricsReporterConfigFile != null) {
logger.info("Trying to load metrics-reporter-config from file: {}", metricsReporterConfigFile);
try {
// enable metrics provided by metrics-jvm.jar
CassandraMetricsRegistry.Metrics.register("jvm.buffers", new BufferPoolMetricSet(ManagementFactory.getPlatformMBeanServer()));
CassandraMetricsRegistry.Metrics.register("jvm.gc", new GarbageCollectorMetricSet());
CassandraMetricsRegistry.Metrics.register("jvm.memory", new MemoryUsageGaugeSet());
CassandraMetricsRegistry.Metrics.register("jvm.fd.usage", new FileDescriptorRatioGauge());
// initialize metrics-reporter-config from yaml file
URL resource = CassandraDaemon.class.getClassLoader().getResource(metricsReporterConfigFile);
if (resource == null) {
logger.warn("Failed to load metrics-reporter-config, file does not exist: {}", metricsReporterConfigFile);
} else {
String reportFileLocation = resource.getFile();
ReporterConfig.loadFromFile(reportFileLocation).enableAll(CassandraMetricsRegistry.Metrics);
}
} catch (Exception e) {
logger.warn("Failed to load metrics-reporter-config, metric sinks will not be activated", e);
}
}
// start server internals
StorageService.instance.registerDaemon(this);
try {
StorageService.instance.initServer();
} catch (ConfigurationException e) {
System.err.println(e.getMessage() + "\nFatal configuration error; unable to start server. See log for stacktrace.");
exitOrFail(1, "Fatal configuration error", e);
}
// Because we are writing to the system_distributed keyspace, this should happen after that is created, which
// happens in StorageService.instance.initServer()
Runnable viewRebuild = () -> {
for (Keyspace keyspace : Keyspace.all()) {
keyspace.viewManager.buildAllViews();
}
logger.debug("Completed submission of build tasks for any materialized views defined at startup");
};
ScheduledExecutors.optionalTasks.schedule(viewRebuild, StorageService.RING_DELAY, TimeUnit.MILLISECONDS);
if (!FBUtilities.getBroadcastAddressAndPort().equals(InetAddressAndPort.getLoopbackAddress()))
Gossiper.waitToSettle();
StorageService.instance.doAuthSetup(false);
// re-enable auto-compaction after gossip is settled, so correct disk boundaries are used
for (Keyspace keyspace : Keyspace.all()) {
for (ColumnFamilyStore cfs : keyspace.getColumnFamilyStores()) {
for (final ColumnFamilyStore store : cfs.concatWithIndexes()) {
// reload CFs in case there was a change of disk boundaries
store.reload();
if (store.getCompactionStrategyManager().shouldBeEnabled()) {
if (DatabaseDescriptor.getAutocompactionOnStartupEnabled()) {
store.enableAutoCompaction();
} else {
logger.info("Not enabling compaction for {}.{}; autocompaction_on_startup_enabled is set to false", store.keyspace.getName(), store.name);
}
}
}
}
}
AuditLogManager.instance.initialize();
// schedule periodic background compaction task submission. this is simply a backstop against compactions stalling
// due to scheduling errors or race conditions
ScheduledExecutors.optionalTasks.scheduleWithFixedDelay(ColumnFamilyStore.getBackgroundCompactionTaskSubmitter(), 5, 1, TimeUnit.MINUTES);
// schedule periodic recomputation of speculative retry thresholds
ScheduledExecutors.optionalTasks.scheduleWithFixedDelay(SPECULATION_THRESHOLD_UPDATER, DatabaseDescriptor.getReadRpcTimeout(NANOSECONDS), DatabaseDescriptor.getReadRpcTimeout(NANOSECONDS), NANOSECONDS);
initializeClientTransports();
// init below this mark before completeSetup().
if (DatabaseDescriptor.getAuthCacheWarmingEnabled())
AuthCacheService.instance.warmCaches();
else
logger.info("Prewarming of auth caches is disabled");
completeSetup();
}
use of com.codahale.metrics.jvm.GarbageCollectorMetricSet in project storm by apache.
the class SystemBolt method prepare.
@SuppressWarnings({ "unchecked" })
@Override
public void prepare(final Map<String, Object> topoConf, TopologyContext context, OutputCollector collector) {
if (prepareWasCalled && !"local".equals(topoConf.get(Config.STORM_CLUSTER_MODE))) {
throw new RuntimeException("A single worker should have 1 SystemBolt instance.");
}
prepareWasCalled = true;
context.registerMetricSet("GC", new GarbageCollectorMetricSet());
context.registerMetricSet("threads", new ThreadStatesGaugeSet());
context.registerMetricSet("memory", new MemoryUsageGaugeSet());
final RuntimeMXBean jvmRt = ManagementFactory.getRuntimeMXBean();
context.registerGauge("uptimeSecs", new Gauge<Long>() {
@Override
public Long getValue() {
return jvmRt.getUptime() / 1000L;
}
});
context.registerGauge("startTimeSecs", new Gauge<Long>() {
@Override
public Long getValue() {
return jvmRt.getStartTime() / 1000L;
}
});
context.registerGauge("newWorkerEvent", new NewWorkerGauge());
int bucketSize = ObjectReader.getInt(topoConf.get(Config.TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS));
registerMetrics(context, (Map<String, String>) topoConf.get(Config.WORKER_METRICS), bucketSize, topoConf);
registerMetrics(context, (Map<String, String>) topoConf.get(Config.TOPOLOGY_WORKER_METRICS), bucketSize, topoConf);
}
Aggregations