use of com.metamx.emitter.service.ServiceEmitter in project druid by druid-io.
the class DruidCoordinatorLogger method run.
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
DruidCluster cluster = params.getDruidCluster();
CoordinatorStats stats = params.getCoordinatorStats();
ServiceEmitter emitter = params.getEmitter();
Map<String, AtomicLong> assigned = stats.getPerTierStats().get("assignedCount");
if (assigned != null) {
for (Map.Entry<String, AtomicLong> entry : assigned.entrySet()) {
log.info("[%s] : Assigned %s segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size());
}
}
emitTieredStats(emitter, "segment/assigned/count", assigned);
Map<String, AtomicLong> dropped = stats.getPerTierStats().get("droppedCount");
if (dropped != null) {
for (Map.Entry<String, AtomicLong> entry : dropped.entrySet()) {
log.info("[%s] : Dropped %s segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size());
}
}
emitTieredStats(emitter, "segment/dropped/count", dropped);
emitTieredStats(emitter, "segment/cost/raw", stats.getPerTierStats().get("initialCost"));
emitTieredStats(emitter, "segment/cost/normalization", stats.getPerTierStats().get("normalization"));
emitTieredStats(emitter, "segment/moved/count", stats.getPerTierStats().get("movedCount"));
emitTieredStats(emitter, "segment/deleted/count", stats.getPerTierStats().get("deletedCount"));
Map<String, AtomicLong> normalized = stats.getPerTierStats().get("normalizedInitialCostTimesOneThousand");
if (normalized != null) {
emitTieredStats(emitter, "segment/cost/normalized", Maps.transformEntries(normalized, new Maps.EntryTransformer<String, AtomicLong, Number>() {
@Override
public Number transformEntry(String key, AtomicLong value) {
return value.doubleValue() / 1000d;
}
}));
}
Map<String, AtomicLong> unneeded = stats.getPerTierStats().get("unneededCount");
if (unneeded != null) {
for (Map.Entry<String, AtomicLong> entry : unneeded.entrySet()) {
log.info("[%s] : Removed %s unneeded segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size());
}
}
emitTieredStats(emitter, "segment/unneeded/count", stats.getPerTierStats().get("unneededCount"));
emitter.emit(new ServiceMetricEvent.Builder().build("segment/overShadowed/count", stats.getGlobalStats().get("overShadowedCount")));
Map<String, AtomicLong> moved = stats.getPerTierStats().get("movedCount");
if (moved != null) {
for (Map.Entry<String, AtomicLong> entry : moved.entrySet()) {
log.info("[%s] : Moved %,d segment(s)", entry.getKey(), entry.getValue().get());
}
}
final Map<String, AtomicLong> unmoved = stats.getPerTierStats().get("unmovedCount");
if (unmoved != null) {
for (Map.Entry<String, AtomicLong> entry : unmoved.entrySet()) {
log.info("[%s] : Let alone %,d segment(s)", entry.getKey(), entry.getValue().get());
}
}
log.info("Load Queues:");
for (MinMaxPriorityQueue<ServerHolder> serverHolders : cluster.getSortedServersByTier()) {
for (ServerHolder serverHolder : serverHolders) {
ImmutableDruidServer server = serverHolder.getServer();
LoadQueuePeon queuePeon = serverHolder.getPeon();
log.info("Server[%s, %s, %s] has %,d left to load, %,d left to drop, %,d bytes queued, %,d bytes served.", server.getName(), server.getType(), server.getTier(), queuePeon.getSegmentsToLoad().size(), queuePeon.getSegmentsToDrop().size(), queuePeon.getLoadQueueSize(), server.getCurrSize());
if (log.isDebugEnabled()) {
for (DataSegment segment : queuePeon.getSegmentsToLoad()) {
log.debug("Segment to load[%s]", segment);
}
for (DataSegment segment : queuePeon.getSegmentsToDrop()) {
log.debug("Segment to drop[%s]", segment);
}
}
}
}
// Emit coordinator metrics
final Set<Map.Entry<String, LoadQueuePeon>> peonEntries = params.getLoadManagementPeons().entrySet();
for (Map.Entry<String, LoadQueuePeon> entry : peonEntries) {
String serverName = entry.getKey();
LoadQueuePeon queuePeon = entry.getValue();
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/size", queuePeon.getLoadQueueSize()));
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/failed", queuePeon.getAndResetFailedAssignCount()));
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/count", queuePeon.getSegmentsToLoad().size()));
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/dropQueue/count", queuePeon.getSegmentsToDrop().size()));
}
for (Map.Entry<String, AtomicLong> entry : coordinator.getSegmentAvailability().entrySet()) {
String datasource = entry.getKey();
Long count = entry.getValue().get();
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, datasource).build("segment/unavailable/count", count));
}
for (Map.Entry<String, CountingMap<String>> entry : coordinator.getReplicationStatus().entrySet()) {
String tier = entry.getKey();
CountingMap<String> datasourceAvailabilities = entry.getValue();
for (Map.Entry<String, AtomicLong> datasourceAvailability : datasourceAvailabilities.entrySet()) {
String datasource = datasourceAvailability.getKey();
Long count = datasourceAvailability.getValue().get();
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.TIER, tier).setDimension(DruidMetrics.DATASOURCE, datasource).build("segment/underReplicated/count", count));
}
}
// Emit segment metrics
CountingMap<String> segmentSizes = new CountingMap<String>();
CountingMap<String> segmentCounts = new CountingMap<String>();
for (DruidDataSource dataSource : params.getDataSources()) {
for (DataSegment segment : dataSource.getSegments()) {
segmentSizes.add(dataSource.getName(), segment.getSize());
segmentCounts.add(dataSource.getName(), 1L);
}
}
for (Map.Entry<String, Long> entry : segmentSizes.snapshot().entrySet()) {
String dataSource = entry.getKey();
Long size = entry.getValue();
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/size", size));
}
for (Map.Entry<String, Long> entry : segmentCounts.snapshot().entrySet()) {
String dataSource = entry.getKey();
Long count = entry.getValue();
emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/count", count));
}
return params;
}
use of com.metamx.emitter.service.ServiceEmitter in project druid by druid-io.
the class EmitterModule method getServiceEmitter.
@Provides
@ManageLifecycle
public ServiceEmitter getServiceEmitter(@Self Supplier<DruidNode> configSupplier, Emitter emitter) {
final DruidNode config = configSupplier.get();
String version = getClass().getPackage().getImplementationVersion();
final ImmutableMap<String, String> otherServiceDimensions = ImmutableMap.of("version", // Version is null during `mvn test`.
Strings.nullToEmpty(version));
final ServiceEmitter retVal = new ServiceEmitter(config.getServiceName(), config.getHostAndPort(), emitter, otherServiceDimensions);
EmittingLogger.registerEmitter(retVal);
return retVal;
}
use of com.metamx.emitter.service.ServiceEmitter in project druid by druid-io.
the class MemcachedCache method create.
public static MemcachedCache create(final MemcachedCacheConfig config) {
final ConcurrentMap<String, AtomicLong> counters = new ConcurrentHashMap<>();
final ConcurrentMap<String, AtomicLong> meters = new ConcurrentHashMap<>();
final AbstractMonitor monitor = new AbstractMonitor() {
final AtomicReference<Map<String, Long>> priorValues = new AtomicReference<Map<String, Long>>(new HashMap<String, Long>());
@Override
public boolean doMonitor(ServiceEmitter emitter) {
final Map<String, Long> priorValues = this.priorValues.get();
final Map<String, Long> currentValues = getCurrentValues();
final ServiceMetricEvent.Builder builder = ServiceMetricEvent.builder();
for (Map.Entry<String, Long> entry : currentValues.entrySet()) {
emitter.emit(builder.setDimension("memcached metric", entry.getKey()).build("query/cache/memcached/total", entry.getValue()));
final Long prior = priorValues.get(entry.getKey());
if (prior != null) {
emitter.emit(builder.setDimension("memcached metric", entry.getKey()).build("query/cache/memcached/delta", entry.getValue() - prior));
}
}
if (!this.priorValues.compareAndSet(priorValues, currentValues)) {
log.error("Prior value changed while I was reporting! updating anyways");
this.priorValues.set(currentValues);
}
return true;
}
private Map<String, Long> getCurrentValues() {
final ImmutableMap.Builder<String, Long> builder = ImmutableMap.builder();
for (Map.Entry<String, AtomicLong> entry : counters.entrySet()) {
builder.put(entry.getKey(), entry.getValue().get());
}
for (Map.Entry<String, AtomicLong> entry : meters.entrySet()) {
builder.put(entry.getKey(), entry.getValue().get());
}
return builder.build();
}
};
try {
LZ4Transcoder transcoder = new LZ4Transcoder(config.getMaxObjectSize());
// always use compression
transcoder.setCompressionThreshold(0);
OperationQueueFactory opQueueFactory;
long maxQueueBytes = config.getMaxOperationQueueSize();
if (maxQueueBytes > 0) {
opQueueFactory = new MemcachedOperationQueueFactory(maxQueueBytes);
} else {
opQueueFactory = new LinkedOperationQueueFactory();
}
final Predicate<String> interesting = new Predicate<String>() {
// See net.spy.memcached.MemcachedConnection.registerMetrics()
private final Set<String> interestingMetrics = ImmutableSet.of("[MEM] Reconnecting Nodes (ReconnectQueue)", //"[MEM] Shutting Down Nodes (NodesToShutdown)", // Busted
"[MEM] Request Rate: All", "[MEM] Average Bytes written to OS per write", "[MEM] Average Bytes read from OS per read", "[MEM] Average Time on wire for operations (µs)", "[MEM] Response Rate: All (Failure + Success + Retry)", "[MEM] Response Rate: Retry", "[MEM] Response Rate: Failure", "[MEM] Response Rate: Success");
@Override
public boolean apply(@Nullable String input) {
return input != null && interestingMetrics.contains(input);
}
};
final MetricCollector metricCollector = new MetricCollector() {
@Override
public void addCounter(String name) {
if (!interesting.apply(name)) {
return;
}
counters.putIfAbsent(name, new AtomicLong(0L));
if (log.isDebugEnabled()) {
log.debug("Add Counter [%s]", name);
}
}
@Override
public void removeCounter(String name) {
if (log.isDebugEnabled()) {
log.debug("Ignoring request to remove [%s]", name);
}
}
@Override
public void incrementCounter(String name) {
if (!interesting.apply(name)) {
return;
}
AtomicLong counter = counters.get(name);
if (counter == null) {
counters.putIfAbsent(name, new AtomicLong(0));
counter = counters.get(name);
}
counter.incrementAndGet();
if (log.isDebugEnabled()) {
log.debug("Increment [%s]", name);
}
}
@Override
public void incrementCounter(String name, int amount) {
if (!interesting.apply(name)) {
return;
}
AtomicLong counter = counters.get(name);
if (counter == null) {
counters.putIfAbsent(name, new AtomicLong(0));
counter = counters.get(name);
}
counter.addAndGet(amount);
if (log.isDebugEnabled()) {
log.debug("Increment [%s] %d", name, amount);
}
}
@Override
public void decrementCounter(String name) {
if (!interesting.apply(name)) {
return;
}
AtomicLong counter = counters.get(name);
if (counter == null) {
counters.putIfAbsent(name, new AtomicLong(0));
counter = counters.get(name);
}
counter.decrementAndGet();
if (log.isDebugEnabled()) {
log.debug("Decrement [%s]", name);
}
}
@Override
public void decrementCounter(String name, int amount) {
if (!interesting.apply(name)) {
return;
}
AtomicLong counter = counters.get(name);
if (counter == null) {
counters.putIfAbsent(name, new AtomicLong(0L));
counter = counters.get(name);
}
counter.addAndGet(-amount);
if (log.isDebugEnabled()) {
log.debug("Decrement [%s] %d", name, amount);
}
}
@Override
public void addMeter(String name) {
if (!interesting.apply(name)) {
return;
}
meters.putIfAbsent(name, new AtomicLong(0L));
if (log.isDebugEnabled()) {
log.debug("Adding meter [%s]", name);
}
}
@Override
public void removeMeter(String name) {
if (!interesting.apply(name)) {
return;
}
if (log.isDebugEnabled()) {
log.debug("Ignoring request to remove meter [%s]", name);
}
}
@Override
public void markMeter(String name) {
if (!interesting.apply(name)) {
return;
}
AtomicLong meter = meters.get(name);
if (meter == null) {
meters.putIfAbsent(name, new AtomicLong(0L));
meter = meters.get(name);
}
meter.incrementAndGet();
if (log.isDebugEnabled()) {
log.debug("Increment counter [%s]", name);
}
}
@Override
public void addHistogram(String name) {
log.debug("Ignoring add histogram [%s]", name);
}
@Override
public void removeHistogram(String name) {
log.debug("Ignoring remove histogram [%s]", name);
}
@Override
public void updateHistogram(String name, int amount) {
log.debug("Ignoring update histogram [%s]: %d", name, amount);
}
};
final ConnectionFactory connectionFactory = new MemcachedCustomConnectionFactoryBuilder().setKetamaNodeRepetitions(1000).setHashAlg(MURMUR3_128).setProtocol(ConnectionFactoryBuilder.Protocol.BINARY).setLocatorType(ConnectionFactoryBuilder.Locator.CONSISTENT).setDaemon(true).setFailureMode(FailureMode.Cancel).setTranscoder(transcoder).setShouldOptimize(true).setOpQueueMaxBlockTime(config.getTimeout()).setOpTimeout(config.getTimeout()).setReadBufferSize(config.getReadBufferSize()).setOpQueueFactory(opQueueFactory).setMetricCollector(metricCollector).setEnableMetrics(// Not as scary as it sounds
MetricType.DEBUG).build();
final List<InetSocketAddress> hosts = AddrUtil.getAddresses(config.getHosts());
final Supplier<ResourceHolder<MemcachedClientIF>> clientSupplier;
if (config.getNumConnections() > 1) {
clientSupplier = new MemcacheClientPool(config.getNumConnections(), new Supplier<MemcachedClientIF>() {
@Override
public MemcachedClientIF get() {
try {
return new MemcachedClient(connectionFactory, hosts);
} catch (IOException e) {
log.error(e, "Unable to create memcached client");
throw Throwables.propagate(e);
}
}
});
} else {
clientSupplier = Suppliers.<ResourceHolder<MemcachedClientIF>>ofInstance(StupidResourceHolder.<MemcachedClientIF>create(new MemcachedClient(connectionFactory, hosts)));
}
return new MemcachedCache(clientSupplier, config, monitor);
} catch (IOException e) {
throw Throwables.propagate(e);
}
}
Aggregations