use of io.druid.query.aggregation.Aggregator in project druid by druid-io.
the class OnheapIncrementalIndex method doAggregate.
private void doAggregate(AggregatorFactory[] metrics, Aggregator[] aggs, ThreadLocal<InputRow> rowContainer, InputRow row, boolean reportParseExceptions) {
rowContainer.set(row);
for (int i = 0; i < aggs.length; i++) {
final Aggregator agg = aggs[i];
synchronized (agg) {
try {
agg.aggregate();
} catch (ParseException e) {
// "aggregate" can throw ParseExceptions if a selector expects something but gets something else.
if (reportParseExceptions) {
throw new ParseException(e, "Encountered parse error for aggregator[%s]", metrics[i].getName());
} else {
log.debug(e, "Encountered parse error, skipping aggregator[%s].", metrics[i].getName());
}
}
}
}
rowContainer.set(null);
}
use of io.druid.query.aggregation.Aggregator in project druid by druid-io.
the class OnheapIncrementalIndex method addToFacts.
@Override
protected Integer addToFacts(AggregatorFactory[] metrics, boolean deserializeComplexMetrics, boolean reportParseExceptions, InputRow row, AtomicInteger numEntries, TimeAndDims key, ThreadLocal<InputRow> rowContainer, Supplier<InputRow> rowSupplier) throws IndexSizeExceededException {
final Integer priorIndex = facts.getPriorIndex(key);
Aggregator[] aggs;
if (null != priorIndex) {
aggs = concurrentGet(priorIndex);
doAggregate(metrics, aggs, rowContainer, row, reportParseExceptions);
} else {
aggs = new Aggregator[metrics.length];
factorizeAggs(metrics, aggs, rowContainer, row);
doAggregate(metrics, aggs, rowContainer, row, reportParseExceptions);
final Integer rowIndex = indexIncrement.getAndIncrement();
concurrentSet(rowIndex, aggs);
// Last ditch sanity checks
if (numEntries.get() >= maxRowCount && facts.getPriorIndex(key) == null) {
throw new IndexSizeExceededException("Maximum number of rows [%d] reached", maxRowCount);
}
final Integer prev = facts.putIfAbsent(key, rowIndex);
if (null == prev) {
numEntries.incrementAndGet();
} else {
// We lost a race
aggs = concurrentGet(prev);
doAggregate(metrics, aggs, rowContainer, row, reportParseExceptions);
// Free up the misfire
concurrentRemove(rowIndex);
// This is expected to occur ~80% of the time in the worst scenarios
}
}
return numEntries.get();
}
use of io.druid.query.aggregation.Aggregator in project druid by druid-io.
the class FinalizingFieldAccessPostAggregatorTest method testComputeWithoutFinalizing.
@Test(expected = UnsupportedOperationException.class)
public void testComputeWithoutFinalizing() {
String aggName = "rows";
Aggregator agg = new CountAggregator();
agg.aggregate();
agg.aggregate();
agg.aggregate();
Map<String, Object> metricValues = Maps.newHashMap();
metricValues.put(aggName, agg.get());
FinalizingFieldAccessPostAggregator postAgg = new FinalizingFieldAccessPostAggregator("final_rows", aggName);
Assert.assertEquals(new Long(3L), postAgg.compute(metricValues));
}
use of io.druid.query.aggregation.Aggregator in project druid by druid-io.
the class StringArrayWritable method toBytes.
public static final byte[] toBytes(final InputRow row, AggregatorFactory[] aggs, boolean reportParseExceptions) {
try {
ByteArrayDataOutput out = ByteStreams.newDataOutput();
//write timestamp
out.writeLong(row.getTimestampFromEpoch());
//writing all dimensions
List<String> dimList = row.getDimensions();
WritableUtils.writeVInt(out, dimList.size());
if (dimList != null) {
for (String dim : dimList) {
List<String> dimValues = row.getDimension(dim);
writeString(dim, out);
writeStringArray(dimValues, out);
}
}
//writing all metrics
Supplier<InputRow> supplier = new Supplier<InputRow>() {
@Override
public InputRow get() {
return row;
}
};
WritableUtils.writeVInt(out, aggs.length);
for (AggregatorFactory aggFactory : aggs) {
String k = aggFactory.getName();
writeString(k, out);
Aggregator agg = aggFactory.factorize(IncrementalIndex.makeColumnSelectorFactory(VirtualColumns.EMPTY, aggFactory, supplier, true));
try {
agg.aggregate();
} catch (ParseException e) {
// "aggregate" can throw ParseExceptions if a selector expects something but gets something else.
if (reportParseExceptions) {
throw new ParseException(e, "Encountered parse error for aggregator[%s]", k);
}
log.debug(e, "Encountered parse error, skipping aggregator[%s].", k);
}
String t = aggFactory.getTypeName();
if (t.equals("float")) {
out.writeFloat(agg.getFloat());
} else if (t.equals("long")) {
WritableUtils.writeVLong(out, agg.getLong());
} else {
//its a complex metric
Object val = agg.get();
ComplexMetricSerde serde = getComplexMetricSerde(t);
writeBytes(serde.toBytes(val), out);
}
}
return out.toByteArray();
} catch (IOException ex) {
throw Throwables.propagate(ex);
}
}
use of io.druid.query.aggregation.Aggregator in project druid by druid-io.
the class IncrementalIndexTest method testgetDimensions.
@Test
public void testgetDimensions() {
final IncrementalIndex<Aggregator> incrementalIndex = new OnheapIncrementalIndex(new IncrementalIndexSchema.Builder().withQueryGranularity(Granularities.NONE).withMetrics(new AggregatorFactory[] { new CountAggregatorFactory("count") }).withDimensionsSpec(new DimensionsSpec(DimensionsSpec.getDefaultSchemas(Arrays.asList("dim0", "dim1")), null, null)).build(), true, 1000000);
closer.closeLater(incrementalIndex);
Assert.assertEquals(Arrays.asList("dim0", "dim1"), incrementalIndex.getDimensionNames());
}
Aggregations