use of com.google.common.base.Function in project druid by druid-io.
the class ChainedExecutionQueryRunner method run.
@Override
public Sequence<T> run(final Query<T> query, final Map<String, Object> responseContext) {
final int priority = BaseQuery.getContextPriority(query, 0);
final Ordering ordering = query.getResultOrdering();
return new BaseSequence<T, Iterator<T>>(new BaseSequence.IteratorMaker<T, Iterator<T>>() {
@Override
public Iterator<T> make() {
// Make it a List<> to materialize all of the values (so that it will submit everything to the executor)
ListenableFuture<List<Iterable<T>>> futures = Futures.allAsList(Lists.newArrayList(Iterables.transform(queryables, new Function<QueryRunner<T>, ListenableFuture<Iterable<T>>>() {
@Override
public ListenableFuture<Iterable<T>> apply(final QueryRunner<T> input) {
if (input == null) {
throw new ISE("Null queryRunner! Looks to be some segment unmapping action happening");
}
return exec.submit(new AbstractPrioritizedCallable<Iterable<T>>(priority) {
@Override
public Iterable<T> call() throws Exception {
try {
Sequence<T> result = input.run(query, responseContext);
if (result == null) {
throw new ISE("Got a null result! Segments are missing!");
}
List<T> retVal = Sequences.toList(result, Lists.<T>newArrayList());
if (retVal == null) {
throw new ISE("Got a null list of results! WTF?!");
}
return retVal;
} catch (QueryInterruptedException e) {
throw Throwables.propagate(e);
} catch (Exception e) {
log.error(e, "Exception with one of the sequences!");
throw Throwables.propagate(e);
}
}
});
}
})));
queryWatcher.registerQuery(query, futures);
try {
final Number timeout = query.getContextValue(QueryContextKeys.TIMEOUT, (Number) null);
return new MergeIterable<>(ordering.nullsFirst(), timeout == null ? futures.get() : futures.get(timeout.longValue(), TimeUnit.MILLISECONDS)).iterator();
} catch (InterruptedException e) {
log.warn(e, "Query interrupted, cancelling pending results, query id [%s]", query.getId());
futures.cancel(true);
throw new QueryInterruptedException(e);
} catch (CancellationException e) {
throw new QueryInterruptedException(e);
} catch (TimeoutException e) {
log.info("Query timeout, cancelling pending results for query id [%s]", query.getId());
futures.cancel(true);
throw new QueryInterruptedException(e);
} catch (ExecutionException e) {
throw Throwables.propagate(e.getCause());
}
}
@Override
public void cleanup(Iterator<T> tIterator) {
}
});
}
use of com.google.common.base.Function in project druid by druid-io.
the class JavaScriptParser method compile.
private static Function<Object, Object> compile(String function) {
final ContextFactory contextFactory = ContextFactory.getGlobal();
final Context context = contextFactory.enterContext();
context.setOptimizationLevel(9);
final ScriptableObject scope = context.initStandardObjects();
final org.mozilla.javascript.Function fn = context.compileFunction(scope, function, "fn", 1, null);
Context.exit();
return new Function<Object, Object>() {
public Object apply(Object input) {
// ideally we need a close() function to discard the context once it is not used anymore
Context cx = Context.getCurrentContext();
if (cx == null) {
cx = contextFactory.enterContext();
}
final Object res = fn.call(cx, scope, scope, new Object[] { input });
return res != null ? Context.toObject(res, scope) : null;
}
};
}
use of com.google.common.base.Function in project druid by druid-io.
the class RowBasedGrouperHelper method makeValueConvertFunctions.
@SuppressWarnings("unchecked")
private static Function<Comparable, Comparable>[] makeValueConvertFunctions(final List<ValueType> valueTypes) {
final Function<Comparable, Comparable>[] functions = new Function[valueTypes.size()];
for (int i = 0; i < functions.length; i++) {
ValueType type = valueTypes.get(i);
// Subquery post-aggs aren't added to the rowSignature (see rowSignatureFor() in GroupByQueryHelper) because
// their types aren't known, so default to String handling.
type = type == null ? ValueType.STRING : type;
switch(type) {
case STRING:
functions[i] = new Function<Comparable, Comparable>() {
@Override
public Comparable apply(@Nullable Comparable input) {
return input == null ? "" : input.toString();
}
};
break;
case LONG:
functions[i] = new Function<Comparable, Comparable>() {
@Override
public Comparable apply(@Nullable Comparable input) {
final Long val = DimensionHandlerUtils.convertObjectToLong(input);
return val == null ? 0L : val;
}
};
break;
case FLOAT:
functions[i] = new Function<Comparable, Comparable>() {
@Override
public Comparable apply(@Nullable Comparable input) {
final Float val = DimensionHandlerUtils.convertObjectToFloat(input);
return val == null ? 0.f : val;
}
};
break;
default:
throw new IAE("invalid type: [%s]", type);
}
}
return functions;
}
use of com.google.common.base.Function in project druid by druid-io.
the class SpillingGrouper method iterator.
@Override
public Iterator<Entry<KeyType>> iterator(final boolean sorted) {
final List<Iterator<Entry<KeyType>>> iterators = new ArrayList<>(1 + files.size());
iterators.add(grouper.iterator(sorted));
for (final File file : files) {
final MappingIterator<Entry<KeyType>> fileIterator = read(file, keySerde.keyClazz());
iterators.add(Iterators.transform(fileIterator, new Function<Entry<KeyType>, Entry<KeyType>>() {
@Override
public Entry<KeyType> apply(Entry<KeyType> entry) {
final Object[] deserializedValues = new Object[entry.getValues().length];
for (int i = 0; i < deserializedValues.length; i++) {
deserializedValues[i] = aggregatorFactories[i].deserialize(entry.getValues()[i]);
if (deserializedValues[i] instanceof Integer) {
// Hack to satisfy the groupBy unit tests; perhaps we could do better by adjusting Jackson config.
deserializedValues[i] = ((Integer) deserializedValues[i]).longValue();
}
}
return new Entry<>(entry.getKey(), deserializedValues);
}
}));
closeables.add(fileIterator);
}
return Groupers.mergeIterators(iterators, sorted ? keyObjComparator : null);
}
use of com.google.common.base.Function in project druid by druid-io.
the class GroupByQueryHelper method createIndexAccumulatorPair.
public static <T> Pair<IncrementalIndex, Accumulator<IncrementalIndex, T>> createIndexAccumulatorPair(final GroupByQuery query, final GroupByQueryConfig config, StupidPool<ByteBuffer> bufferPool, final boolean combine) {
final GroupByQueryConfig querySpecificConfig = config.withOverrides(query);
final Granularity gran = query.getGranularity();
final long timeStart = query.getIntervals().get(0).getStartMillis();
long granTimeStart = timeStart;
if (!(Granularities.ALL.equals(gran))) {
granTimeStart = gran.bucketStart(new DateTime(timeStart)).getMillis();
}
final List<AggregatorFactory> aggs;
if (combine) {
aggs = Lists.transform(query.getAggregatorSpecs(), new Function<AggregatorFactory, AggregatorFactory>() {
@Override
public AggregatorFactory apply(AggregatorFactory input) {
return input.getCombiningFactory();
}
});
} else {
aggs = query.getAggregatorSpecs();
}
final List<String> dimensions = Lists.transform(query.getDimensions(), new Function<DimensionSpec, String>() {
@Override
public String apply(DimensionSpec input) {
return input.getOutputName();
}
});
final IncrementalIndex index;
final boolean sortResults = query.getContextValue(CTX_KEY_SORT_RESULTS, true);
// All groupBy dimensions are strings, for now.
final List<DimensionSchema> dimensionSchemas = Lists.newArrayList();
for (DimensionSpec dimension : query.getDimensions()) {
dimensionSchemas.add(new StringDimensionSchema(dimension.getOutputName()));
}
final IncrementalIndexSchema indexSchema = new IncrementalIndexSchema.Builder().withDimensionsSpec(new DimensionsSpec(dimensionSchemas, null, null)).withMetrics(aggs.toArray(new AggregatorFactory[aggs.size()])).withQueryGranularity(gran).withMinTimestamp(granTimeStart).build();
if (query.getContextValue("useOffheap", false)) {
index = new OffheapIncrementalIndex(indexSchema, false, true, sortResults, querySpecificConfig.getMaxResults(), bufferPool);
} else {
index = new OnheapIncrementalIndex(indexSchema, false, true, sortResults, querySpecificConfig.getMaxResults());
}
Accumulator<IncrementalIndex, T> accumulator = new Accumulator<IncrementalIndex, T>() {
@Override
public IncrementalIndex accumulate(IncrementalIndex accumulated, T in) {
if (in instanceof MapBasedRow) {
try {
MapBasedRow row = (MapBasedRow) in;
accumulated.add(new MapBasedInputRow(row.getTimestamp(), dimensions, row.getEvent()));
} catch (IndexSizeExceededException e) {
throw new ResourceLimitExceededException(e.getMessage());
}
} else {
throw new ISE("Unable to accumulate something of type [%s]", in.getClass());
}
return accumulated;
}
};
return new Pair<>(index, accumulator);
}
Aggregations