use of org.apache.druid.collections.ResourceHolder in project druid by druid-io.
the class MemcachedCache method create.
public static MemcachedCache create(final MemcachedCacheConfig config) {
final ConcurrentMap<String, AtomicLong> counters = new ConcurrentHashMap<>();
final ConcurrentMap<String, AtomicLong> meters = new ConcurrentHashMap<>();
final AbstractMonitor monitor = new AbstractMonitor() {
final AtomicReference<Map<String, Long>> priorValues = new AtomicReference<Map<String, Long>>(new HashMap<String, Long>());
@Override
public boolean doMonitor(ServiceEmitter emitter) {
final Map<String, Long> priorValues = this.priorValues.get();
final Map<String, Long> currentValues = getCurrentValues();
final ServiceMetricEvent.Builder builder = ServiceMetricEvent.builder();
for (Map.Entry<String, Long> entry : currentValues.entrySet()) {
emitter.emit(builder.setDimension("memcached metric", entry.getKey()).build("query/cache/memcached/total", entry.getValue()));
final Long prior = priorValues.get(entry.getKey());
if (prior != null) {
emitter.emit(builder.setDimension("memcached metric", entry.getKey()).build("query/cache/memcached/delta", entry.getValue() - prior));
}
}
if (!this.priorValues.compareAndSet(priorValues, currentValues)) {
log.error("Prior value changed while I was reporting! updating anyways");
this.priorValues.set(currentValues);
}
return true;
}
private Map<String, Long> getCurrentValues() {
final ImmutableMap.Builder<String, Long> builder = ImmutableMap.builder();
for (Map.Entry<String, AtomicLong> entry : counters.entrySet()) {
builder.put(entry.getKey(), entry.getValue().get());
}
for (Map.Entry<String, AtomicLong> entry : meters.entrySet()) {
builder.put(entry.getKey(), entry.getValue().get());
}
return builder.build();
}
};
try {
LZ4Transcoder transcoder = new LZ4Transcoder(config.getMaxObjectSize());
// always use compression
transcoder.setCompressionThreshold(0);
OperationQueueFactory opQueueFactory;
long maxQueueBytes = config.getMaxOperationQueueSize();
if (maxQueueBytes > 0) {
opQueueFactory = new MemcachedOperationQueueFactory(maxQueueBytes);
} else {
opQueueFactory = new LinkedOperationQueueFactory();
}
final Predicate<String> interesting = new Predicate<String>() {
// See net.spy.memcached.MemcachedConnection.registerMetrics()
private final Set<String> interestingMetrics = ImmutableSet.of("[MEM] Reconnecting Nodes (ReconnectQueue)", // "[MEM] Shutting Down Nodes (NodesToShutdown)", // Busted
"[MEM] Request Rate: All", "[MEM] Average Bytes written to OS per write", "[MEM] Average Bytes read from OS per read", "[MEM] Average Time on wire for operations (µs)", "[MEM] Response Rate: All (Failure + Success + Retry)", "[MEM] Response Rate: Retry", "[MEM] Response Rate: Failure", "[MEM] Response Rate: Success");
@Override
public boolean apply(@Nullable String input) {
return input != null && interestingMetrics.contains(input);
}
};
final MetricCollector metricCollector = new MetricCollector() {
@Override
public void addCounter(String name) {
if (!interesting.apply(name)) {
return;
}
counters.putIfAbsent(name, new AtomicLong(0L));
if (log.isDebugEnabled()) {
log.debug("Add Counter [%s]", name);
}
}
@Override
public void removeCounter(String name) {
if (log.isDebugEnabled()) {
log.debug("Ignoring request to remove [%s]", name);
}
}
@Override
public void incrementCounter(String name) {
if (!interesting.apply(name)) {
return;
}
AtomicLong counter = counters.get(name);
if (counter == null) {
counters.putIfAbsent(name, new AtomicLong(0));
counter = counters.get(name);
}
counter.incrementAndGet();
if (log.isDebugEnabled()) {
log.debug("Increment [%s]", name);
}
}
@Override
public void incrementCounter(String name, int amount) {
if (!interesting.apply(name)) {
return;
}
AtomicLong counter = counters.get(name);
if (counter == null) {
counters.putIfAbsent(name, new AtomicLong(0));
counter = counters.get(name);
}
counter.addAndGet(amount);
if (log.isDebugEnabled()) {
log.debug("Increment [%s] %d", name, amount);
}
}
@Override
public void decrementCounter(String name) {
if (!interesting.apply(name)) {
return;
}
AtomicLong counter = counters.get(name);
if (counter == null) {
counters.putIfAbsent(name, new AtomicLong(0));
counter = counters.get(name);
}
counter.decrementAndGet();
if (log.isDebugEnabled()) {
log.debug("Decrement [%s]", name);
}
}
@Override
public void decrementCounter(String name, int amount) {
if (!interesting.apply(name)) {
return;
}
AtomicLong counter = counters.get(name);
if (counter == null) {
counters.putIfAbsent(name, new AtomicLong(0L));
counter = counters.get(name);
}
counter.addAndGet(-amount);
if (log.isDebugEnabled()) {
log.debug("Decrement [%s] %d", name, amount);
}
}
@Override
public void addMeter(String name) {
if (!interesting.apply(name)) {
return;
}
meters.putIfAbsent(name, new AtomicLong(0L));
if (log.isDebugEnabled()) {
log.debug("Adding meter [%s]", name);
}
}
@Override
public void removeMeter(String name) {
if (!interesting.apply(name)) {
return;
}
if (log.isDebugEnabled()) {
log.debug("Ignoring request to remove meter [%s]", name);
}
}
@Override
public void markMeter(String name) {
if (!interesting.apply(name)) {
return;
}
AtomicLong meter = meters.get(name);
if (meter == null) {
meters.putIfAbsent(name, new AtomicLong(0L));
meter = meters.get(name);
}
meter.incrementAndGet();
if (log.isDebugEnabled()) {
log.debug("Increment counter [%s]", name);
}
}
@Override
public void addHistogram(String name) {
log.debug("Ignoring add histogram [%s]", name);
}
@Override
public void removeHistogram(String name) {
log.debug("Ignoring remove histogram [%s]", name);
}
@Override
public void updateHistogram(String name, int amount) {
log.debug("Ignoring update histogram [%s]: %d", name, amount);
}
};
final ConnectionFactory connectionFactory = new MemcachedCustomConnectionFactoryBuilder().setKetamaNodeRepetitions(1000).setHashAlg(MURMUR3_128).setProtocol(ConnectionFactoryBuilder.Protocol.valueOf(StringUtils.toUpperCase(config.getProtocol()))).setLocatorType(ConnectionFactoryBuilder.Locator.valueOf(StringUtils.toUpperCase(config.getLocator()))).setDaemon(true).setFailureMode(FailureMode.Cancel).setTranscoder(transcoder).setShouldOptimize(true).setOpQueueMaxBlockTime(config.getTimeout()).setOpTimeout(config.getTimeout()).setReadBufferSize(config.getReadBufferSize()).setOpQueueFactory(opQueueFactory).setMetricCollector(metricCollector).setEnableMetrics(// Not as scary as it sounds
MetricType.DEBUG).build();
final List<InetSocketAddress> hosts = AddrUtil.getAddresses(config.getHosts());
final Supplier<ResourceHolder<MemcachedClientIF>> clientSupplier;
if (config.getNumConnections() > 1) {
clientSupplier = new MemcacheClientPool(config.getNumConnections(), new Supplier<MemcachedClientIF>() {
@Override
public MemcachedClientIF get() {
try {
return new MemcachedClient(connectionFactory, hosts);
} catch (IOException e) {
log.error(e, "Unable to create memcached client");
throw new RuntimeException(e);
}
}
});
} else {
clientSupplier = Suppliers.ofInstance(StupidResourceHolder.create(new MemcachedClient(connectionFactory, hosts)));
}
return new MemcachedCache(clientSupplier, config, monitor);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
use of org.apache.druid.collections.ResourceHolder in project druid by druid-io.
the class GroupByRowProcessor method process.
/**
* Process the input of sequence "rows" (output by "subquery") based on "query" and returns a {@link ResultSupplier}.
*
* In addition to grouping using dimensions and metrics, it will also apply filters (both DimFilter and interval
* filters).
*
* The input sequence is processed synchronously with the call to this method, and result iteration happens lazy upon
* calls to the {@link ResultSupplier}. Make sure to close it when you're done.
*/
public static ResultSupplier process(final GroupByQuery query, final GroupByQuery subquery, final Sequence<ResultRow> rows, final GroupByQueryConfig config, final GroupByQueryResource resource, final ObjectMapper spillMapper, final String processingTmpDir, final int mergeBufferSize) {
final Closer closeOnExit = Closer.create();
final GroupByQueryConfig querySpecificConfig = config.withOverrides(query);
final File temporaryStorageDirectory = new File(processingTmpDir, StringUtils.format("druid-groupBy-%s_%s", UUID.randomUUID(), query.getId()));
final LimitedTemporaryStorage temporaryStorage = new LimitedTemporaryStorage(temporaryStorageDirectory, querySpecificConfig.getMaxOnDiskStorage());
closeOnExit.register(temporaryStorage);
Pair<Grouper<RowBasedKey>, Accumulator<AggregateResult, ResultRow>> pair = RowBasedGrouperHelper.createGrouperAccumulatorPair(query, subquery, querySpecificConfig, new Supplier<ByteBuffer>() {
@Override
public ByteBuffer get() {
final ResourceHolder<ByteBuffer> mergeBufferHolder = resource.getMergeBuffer();
closeOnExit.register(mergeBufferHolder);
return mergeBufferHolder.get();
}
}, temporaryStorage, spillMapper, mergeBufferSize);
final Grouper<RowBasedKey> grouper = pair.lhs;
final Accumulator<AggregateResult, ResultRow> accumulator = pair.rhs;
closeOnExit.register(grouper);
final AggregateResult retVal = rows.accumulate(AggregateResult.ok(), accumulator);
if (!retVal.isOk()) {
throw new ResourceLimitExceededException(retVal.getReason());
}
return new ResultSupplier() {
@Override
public Sequence<ResultRow> results(@Nullable List<DimensionSpec> dimensionsToInclude) {
return getRowsFromGrouper(query, grouper, dimensionsToInclude);
}
@Override
public void close() throws IOException {
closeOnExit.close();
}
};
}
use of org.apache.druid.collections.ResourceHolder in project druid by druid-io.
the class DecompressingByteBufferObjectStrategy method fromByteBuffer.
@Override
public ResourceHolder<ByteBuffer> fromByteBuffer(ByteBuffer buffer, int numBytes) {
final ResourceHolder<ByteBuffer> bufHolder = CompressedPools.getByteBuf(order);
final ByteBuffer buf = bufHolder.get();
buf.clear();
decompressor.decompress(buffer, numBytes, buf);
// Needed, because if e. g. if this compressed buffer contains 3-byte integers, it should be possible to getInt()
// from the buffer, including padding. See CompressedVSizeColumnarIntsSupplier.bufferPadding().
buf.limit(buf.capacity());
return new ResourceHolder<ByteBuffer>() {
@Override
public ByteBuffer get() {
return buf;
}
@Override
public void close() {
bufHolder.close();
}
};
}
Aggregations