use of io.druid.java.util.common.IAE in project druid by druid-io.
the class GroupByQueryEngineV2 method process.
public static Sequence<Row> process(final GroupByQuery query, final StorageAdapter storageAdapter, final StupidPool<ByteBuffer> intermediateResultsBufferPool, final GroupByQueryConfig config) {
if (storageAdapter == null) {
throw new ISE("Null storage adapter found. Probably trying to issue a query against a segment being memory unmapped.");
}
final List<Interval> intervals = query.getQuerySegmentSpec().getIntervals();
if (intervals.size() != 1) {
throw new IAE("Should only have one interval, got[%s]", intervals);
}
final Sequence<Cursor> cursors = storageAdapter.makeCursors(Filters.toFilter(query.getDimFilter()), intervals.get(0), query.getVirtualColumns(), query.getGranularity(), false);
final ResourceHolder<ByteBuffer> bufferHolder = intermediateResultsBufferPool.take();
final String fudgeTimestampString = Strings.emptyToNull(query.getContextValue(GroupByStrategyV2.CTX_KEY_FUDGE_TIMESTAMP, ""));
final DateTime fudgeTimestamp = fudgeTimestampString == null ? null : new DateTime(Long.parseLong(fudgeTimestampString));
return Sequences.concat(Sequences.withBaggage(Sequences.map(cursors, new Function<Cursor, Sequence<Row>>() {
@Override
public Sequence<Row> apply(final Cursor cursor) {
return new BaseSequence<>(new BaseSequence.IteratorMaker<Row, GroupByEngineIterator>() {
@Override
public GroupByEngineIterator make() {
ColumnSelectorPlus<GroupByColumnSelectorStrategy>[] selectorPlus = DimensionHandlerUtils.createColumnSelectorPluses(STRATEGY_FACTORY, query.getDimensions(), cursor);
return new GroupByEngineIterator(query, config, cursor, bufferHolder.get(), fudgeTimestamp, createGroupBySelectorPlus(selectorPlus));
}
@Override
public void cleanup(GroupByEngineIterator iterFromMake) {
iterFromMake.close();
}
});
}
}), new Closeable() {
@Override
public void close() throws IOException {
CloseQuietly.close(bufferHolder);
}
}));
}
use of io.druid.java.util.common.IAE in project druid by druid-io.
the class RowBasedGrouperHelper method makeValueConvertFunctions.
@SuppressWarnings("unchecked")
private static Function<Comparable, Comparable>[] makeValueConvertFunctions(final List<ValueType> valueTypes) {
final Function<Comparable, Comparable>[] functions = new Function[valueTypes.size()];
for (int i = 0; i < functions.length; i++) {
ValueType type = valueTypes.get(i);
// Subquery post-aggs aren't added to the rowSignature (see rowSignatureFor() in GroupByQueryHelper) because
// their types aren't known, so default to String handling.
type = type == null ? ValueType.STRING : type;
switch(type) {
case STRING:
functions[i] = new Function<Comparable, Comparable>() {
@Override
public Comparable apply(@Nullable Comparable input) {
return input == null ? "" : input.toString();
}
};
break;
case LONG:
functions[i] = new Function<Comparable, Comparable>() {
@Override
public Comparable apply(@Nullable Comparable input) {
final Long val = DimensionHandlerUtils.convertObjectToLong(input);
return val == null ? 0L : val;
}
};
break;
case FLOAT:
functions[i] = new Function<Comparable, Comparable>() {
@Override
public Comparable apply(@Nullable Comparable input) {
final Float val = DimensionHandlerUtils.convertObjectToFloat(input);
return val == null ? 0.f : val;
}
};
break;
default:
throw new IAE("invalid type: [%s]", type);
}
}
return functions;
}
use of io.druid.java.util.common.IAE in project druid by druid-io.
the class GroupByStrategyV1 method processSubqueryResult.
@Override
public Sequence<Row> processSubqueryResult(GroupByQuery subquery, GroupByQuery query, GroupByQueryResource resource, Sequence<Row> subqueryResult) {
final Set<AggregatorFactory> aggs = Sets.newHashSet();
// Nested group-bys work by first running the inner query and then materializing the results in an incremental
// index which the outer query is then run against. To build the incremental index, we use the fieldNames from
// the aggregators for the outer query to define the column names so that the index will match the query. If
// there are multiple types of aggregators in the outer query referencing the same fieldName, we will try to build
// multiple columns of the same name using different aggregator types and will fail. Here, we permit multiple
// aggregators of the same type referencing the same fieldName (and skip creating identical columns for the
// subsequent ones) and return an error if the aggregator types are different.
final Set<String> dimensionNames = Sets.newHashSet();
for (DimensionSpec dimension : subquery.getDimensions()) {
dimensionNames.add(dimension.getOutputName());
}
for (AggregatorFactory aggregatorFactory : query.getAggregatorSpecs()) {
for (final AggregatorFactory transferAgg : aggregatorFactory.getRequiredColumns()) {
if (dimensionNames.contains(transferAgg.getName())) {
// doesn't have this problem.
continue;
}
if (Iterables.any(aggs, new Predicate<AggregatorFactory>() {
@Override
public boolean apply(AggregatorFactory agg) {
return agg.getName().equals(transferAgg.getName()) && !agg.equals(transferAgg);
}
})) {
throw new IAE("Inner aggregator can currently only be referenced by a single type of outer aggregator" + " for '%s'", transferAgg.getName());
}
aggs.add(transferAgg);
}
}
// We need the inner incremental index to have all the columns required by the outer query
final GroupByQuery innerQuery = new GroupByQuery.Builder(subquery).setAggregatorSpecs(Lists.newArrayList(aggs)).setInterval(subquery.getIntervals()).setPostAggregatorSpecs(Lists.<PostAggregator>newArrayList()).build();
final GroupByQuery outerQuery = new GroupByQuery.Builder(query).setLimitSpec(query.getLimitSpec().merge(subquery.getLimitSpec())).build();
final IncrementalIndex innerQueryResultIndex = GroupByQueryHelper.makeIncrementalIndex(innerQuery.withOverriddenContext(ImmutableMap.<String, Object>of(GroupByQueryHelper.CTX_KEY_SORT_RESULTS, true)), configSupplier.get(), bufferPool, subqueryResult, false);
//Outer query might have multiple intervals, but they are expected to be non-overlapping and sorted which
//is ensured by QuerySegmentSpec.
//GroupByQueryEngine can only process one interval at a time, so we need to call it once per interval
//and concatenate the results.
final IncrementalIndex outerQueryResultIndex = GroupByQueryHelper.makeIncrementalIndex(outerQuery, configSupplier.get(), bufferPool, Sequences.concat(Sequences.map(Sequences.simple(outerQuery.getIntervals()), new Function<Interval, Sequence<Row>>() {
@Override
public Sequence<Row> apply(Interval interval) {
return process(outerQuery.withQuerySegmentSpec(new MultipleIntervalSegmentSpec(ImmutableList.of(interval))), new IncrementalIndexStorageAdapter(innerQueryResultIndex));
}
})), true);
innerQueryResultIndex.close();
return Sequences.withBaggage(outerQuery.applyLimit(GroupByQueryHelper.postAggregate(query, outerQueryResultIndex)), outerQueryResultIndex);
}
use of io.druid.java.util.common.IAE in project druid by druid-io.
the class HyperUniquesAggregatorFactory method factorize.
@Override
public Aggregator factorize(ColumnSelectorFactory metricFactory) {
ObjectColumnSelector selector = metricFactory.makeObjectColumnSelector(fieldName);
if (selector == null) {
return NoopAggregator.instance();
}
final Class classOfObject = selector.classOfObject();
if (classOfObject.equals(Object.class) || HyperLogLogCollector.class.isAssignableFrom(classOfObject)) {
return new HyperUniquesAggregator(selector);
}
throw new IAE("Incompatible type for metric[%s], expected a HyperUnique, got a %s", fieldName, classOfObject);
}
use of io.druid.java.util.common.IAE in project druid by druid-io.
the class HyperUniquesAggregatorFactory method factorizeBuffered.
@Override
public BufferAggregator factorizeBuffered(ColumnSelectorFactory metricFactory) {
ObjectColumnSelector selector = metricFactory.makeObjectColumnSelector(fieldName);
if (selector == null) {
return NoopBufferAggregator.instance();
}
final Class classOfObject = selector.classOfObject();
if (classOfObject.equals(Object.class) || HyperLogLogCollector.class.isAssignableFrom(classOfObject)) {
return new HyperUniquesBufferAggregator(selector);
}
throw new IAE("Incompatible type for metric[%s], expected a HyperUnique, got a %s", fieldName, classOfObject);
}
Aggregations