use of io.druid.query.select.EventHolder in project hive by apache.
the class DruidSelectQueryRecordReader method next.
@Override
public boolean next(NullWritable key, DruidWritable value) throws IOException {
if (nextKeyValue()) {
// Update value
value.getValue().clear();
EventHolder e = values.next();
value.getValue().put(DruidTable.DEFAULT_TIMESTAMP_COLUMN, e.getTimestamp().getMillis());
value.getValue().putAll(e.getEvent());
return true;
}
return false;
}
use of io.druid.query.select.EventHolder in project druid by druid-io.
the class CachingClusteredClientTest method makeSelectResults.
private Iterable<Result<SelectResultValue>> makeSelectResults(Set<String> dimensions, Set<String> metrics, Object... objects) {
List<Result<SelectResultValue>> retVal = Lists.newArrayList();
int index = 0;
while (index < objects.length) {
DateTime timestamp = (DateTime) objects[index++];
List<EventHolder> values = Lists.newArrayList();
while (index < objects.length && !(objects[index] instanceof DateTime)) {
values.add(new EventHolder(null, 0, (Map) objects[index++]));
}
retVal.add(new Result<>(timestamp, new SelectResultValue(null, dimensions, metrics, values)));
}
return retVal;
}
use of io.druid.query.select.EventHolder in project druid by druid-io.
the class SelectBenchmark method queryMultiQueryableIndex.
@Benchmark
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public void queryMultiQueryableIndex(Blackhole blackhole) throws Exception {
SelectQuery queryCopy = query.withPagingSpec(PagingSpec.newSpec(pagingThreshold));
String segmentName;
List<QueryRunner<Result<SelectResultValue>>> singleSegmentRunners = Lists.newArrayList();
QueryToolChest toolChest = factory.getToolchest();
for (int i = 0; i < numSegments; i++) {
segmentName = "qIndex" + i;
QueryRunner<Result<SelectResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner(factory, segmentName, new QueryableIndexSegment(segmentName, qIndexes.get(i)));
singleSegmentRunners.add(toolChest.preMergeQueryDecoration(runner));
}
QueryRunner theRunner = toolChest.postMergeQueryDecoration(new FinalizeResultsQueryRunner<>(toolChest.mergeResults(factory.mergeRunners(executorService, singleSegmentRunners)), toolChest));
boolean done = false;
while (!done) {
Sequence<Result<SelectResultValue>> queryResult = theRunner.run(queryCopy, Maps.<String, Object>newHashMap());
List<Result<SelectResultValue>> results = Sequences.toList(queryResult, Lists.<Result<SelectResultValue>>newArrayList());
SelectResultValue result = results.get(0).getValue();
if (result.getEvents().size() == 0) {
done = true;
} else {
for (EventHolder eh : result.getEvents()) {
blackhole.consume(eh);
}
queryCopy = incrementQueryPagination(queryCopy, result);
}
}
}
use of io.druid.query.select.EventHolder in project hive by apache.
the class DruidSelectQueryRecordReader method getCurrentValue.
@Override
public DruidWritable getCurrentValue() throws IOException, InterruptedException {
// Create new value
DruidWritable value = new DruidWritable();
EventHolder e = values.next();
value.getValue().put(DruidTable.DEFAULT_TIMESTAMP_COLUMN, e.getTimestamp().getMillis());
value.getValue().putAll(e.getEvent());
return value;
}
use of io.druid.query.select.EventHolder in project druid by druid-io.
the class QueryMaker method executeSelect.
private Sequence<Object[]> executeSelect(final DruidQueryBuilder queryBuilder, final SelectQuery baseQuery) {
Preconditions.checkState(queryBuilder.getGrouping() == null, "grouping must be null");
final List<RelDataTypeField> fieldList = queryBuilder.getRowType().getFieldList();
final Integer limit = queryBuilder.getLimitSpec() != null ? queryBuilder.getLimitSpec().getLimit() : null;
// Select is paginated, we need to make multiple queries.
final Sequence<Sequence<Object[]>> sequenceOfSequences = Sequences.simple(new Iterable<Sequence<Object[]>>() {
@Override
public Iterator<Sequence<Object[]>> iterator() {
final AtomicBoolean morePages = new AtomicBoolean(true);
final AtomicReference<Map<String, Integer>> pagingIdentifiers = new AtomicReference<>();
final AtomicLong rowsRead = new AtomicLong();
// Each Sequence<Object[]> is one page.
return new Iterator<Sequence<Object[]>>() {
@Override
public boolean hasNext() {
return morePages.get();
}
@Override
public Sequence<Object[]> next() {
final SelectQuery queryWithPagination = baseQuery.withPagingSpec(new PagingSpec(pagingIdentifiers.get(), plannerContext.getPlannerConfig().getSelectThreshold(), true));
Hook.QUERY_PLAN.run(queryWithPagination);
morePages.set(false);
final AtomicBoolean gotResult = new AtomicBoolean();
return Sequences.concat(Sequences.map(queryWithPagination.run(walker, Maps.<String, Object>newHashMap()), new Function<Result<SelectResultValue>, Sequence<Object[]>>() {
@Override
public Sequence<Object[]> apply(final Result<SelectResultValue> result) {
if (!gotResult.compareAndSet(false, true)) {
throw new ISE("WTF?! Expected single result from Select query but got multiple!");
}
pagingIdentifiers.set(result.getValue().getPagingIdentifiers());
final List<Object[]> retVals = new ArrayList<>();
for (EventHolder holder : result.getValue().getEvents()) {
morePages.set(true);
final Map<String, Object> map = holder.getEvent();
final Object[] retVal = new Object[fieldList.size()];
for (RelDataTypeField field : fieldList) {
final String outputName = queryBuilder.getRowOrder().get(field.getIndex());
if (outputName.equals(Column.TIME_COLUMN_NAME)) {
retVal[field.getIndex()] = coerce(holder.getTimestamp().getMillis(), field.getType().getSqlTypeName());
} else {
retVal[field.getIndex()] = coerce(map.get(outputName), field.getType().getSqlTypeName());
}
}
if (limit == null || rowsRead.incrementAndGet() <= limit) {
retVals.add(retVal);
} else {
morePages.set(false);
return Sequences.simple(retVals);
}
}
return Sequences.simple(retVals);
}
}));
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
});
return Sequences.concat(sequenceOfSequences);
}
Aggregations