use of org.apache.druid.java.util.common.IAE in project druid by druid-io.
the class InputRowSerde method toBytes.
public static SerializeResult toBytes(final Map<String, IndexSerdeTypeHelper> typeHelperMap, final InputRow row, AggregatorFactory[] aggs) {
try {
List<String> parseExceptionMessages = new ArrayList<>();
ByteArrayDataOutput out = ByteStreams.newDataOutput();
// write timestamp
out.writeLong(row.getTimestampFromEpoch());
// writing all dimensions
List<String> dimList = row.getDimensions();
WritableUtils.writeVInt(out, dimList.size());
for (String dim : dimList) {
IndexSerdeTypeHelper typeHelper = typeHelperMap.get(dim);
if (typeHelper == null) {
typeHelper = STRING_HELPER;
}
writeString(dim, out);
try {
typeHelper.serialize(out, row.getRaw(dim));
} catch (ParseException pe) {
parseExceptionMessages.add(pe.getMessage());
}
}
// writing all metrics
Supplier<InputRow> supplier = () -> row;
WritableUtils.writeVInt(out, aggs.length);
for (AggregatorFactory aggFactory : aggs) {
String k = aggFactory.getName();
writeString(k, out);
try (Aggregator agg = aggFactory.factorize(IncrementalIndex.makeColumnSelectorFactory(VirtualColumns.EMPTY, aggFactory, supplier, true))) {
try {
agg.aggregate();
} catch (ParseException e) {
// "aggregate" can throw ParseExceptions if a selector expects something but gets something else.
log.debug(e, "Encountered parse error, skipping aggregator[%s].", k);
parseExceptionMessages.add(e.getMessage());
}
final ColumnType type = aggFactory.getIntermediateType();
if (agg.isNull()) {
out.writeByte(NullHandling.IS_NULL_BYTE);
} else {
out.writeByte(NullHandling.IS_NOT_NULL_BYTE);
if (type.is(ValueType.FLOAT)) {
out.writeFloat(agg.getFloat());
} else if (type.is(ValueType.LONG)) {
WritableUtils.writeVLong(out, agg.getLong());
} else if (type.is(ValueType.DOUBLE)) {
out.writeDouble(agg.getDouble());
} else if (type.is(ValueType.COMPLEX)) {
Object val = agg.get();
ComplexMetricSerde serde = getComplexMetricSerde(type.getComplexTypeName());
writeBytes(serde.toBytes(val), out);
} else {
throw new IAE("Unable to serialize type[%s]", type.asTypeString());
}
}
}
}
return new SerializeResult(out.toByteArray(), parseExceptionMessages);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
use of org.apache.druid.java.util.common.IAE in project druid by druid-io.
the class BaseVarianceSqlAggregator method toDruidAggregation.
@Nullable
@Override
public Aggregation toDruidAggregation(PlannerContext plannerContext, RowSignature rowSignature, VirtualColumnRegistry virtualColumnRegistry, RexBuilder rexBuilder, String name, AggregateCall aggregateCall, Project project, List<Aggregation> existingAggregations, boolean finalizeAggregations) {
final RexNode inputOperand = Expressions.fromFieldAccess(rowSignature, project, aggregateCall.getArgList().get(0));
final DruidExpression input = Aggregations.toDruidExpressionForNumericAggregator(plannerContext, rowSignature, inputOperand);
if (input == null) {
return null;
}
final AggregatorFactory aggregatorFactory;
final RelDataType dataType = inputOperand.getType();
final ColumnType inputType = Calcites.getColumnTypeForRelDataType(dataType);
final DimensionSpec dimensionSpec;
final String aggName = StringUtils.format("%s:agg", name);
final SqlAggFunction func = calciteFunction();
final String estimator;
final String inputTypeName;
PostAggregator postAggregator = null;
if (input.isSimpleExtraction()) {
dimensionSpec = input.getSimpleExtraction().toDimensionSpec(null, inputType);
} else {
String virtualColumnName = virtualColumnRegistry.getOrCreateVirtualColumnForExpression(input, dataType);
dimensionSpec = new DefaultDimensionSpec(virtualColumnName, null, inputType);
}
if (inputType == null) {
throw new IAE("VarianceSqlAggregator[%s] has invalid inputType", func);
}
if (inputType.isNumeric()) {
inputTypeName = StringUtils.toLowerCase(inputType.getType().name());
} else {
throw new IAE("VarianceSqlAggregator[%s] has invalid inputType[%s]", func, inputType.asTypeString());
}
if (func == SqlStdOperatorTable.VAR_POP || func == SqlStdOperatorTable.STDDEV_POP) {
estimator = "population";
} else {
estimator = "sample";
}
aggregatorFactory = new VarianceAggregatorFactory(aggName, dimensionSpec.getDimension(), estimator, inputTypeName);
if (func == SqlStdOperatorTable.STDDEV_POP || func == SqlStdOperatorTable.STDDEV_SAMP || func == SqlStdOperatorTable.STDDEV) {
postAggregator = new StandardDeviationPostAggregator(name, aggregatorFactory.getName(), estimator);
}
return Aggregation.create(ImmutableList.of(aggregatorFactory), postAggregator);
}
use of org.apache.druid.java.util.common.IAE in project druid by druid-io.
the class CompactionTaskTest method testCreateCompactionTaskWithConflictingGranularitySpecAndSegmentGranularityShouldThrowIAE.
@Test(expected = IAE.class)
public void testCreateCompactionTaskWithConflictingGranularitySpecAndSegmentGranularityShouldThrowIAE() {
final Builder builder = new Builder(DATA_SOURCE, segmentCacheManagerFactory, RETRY_POLICY_FACTORY);
builder.inputSpec(new CompactionIntervalSpec(COMPACTION_INTERVAL, SegmentUtils.hashIds(SEGMENTS)));
builder.tuningConfig(createTuningConfig());
builder.segmentGranularity(Granularities.HOUR);
builder.granularitySpec(new ClientCompactionTaskGranularitySpec(Granularities.MINUTE, Granularities.DAY, null));
try {
builder.build();
} catch (IAE iae) {
Assert.assertEquals(StringUtils.format(CONFLICTING_SEGMENT_GRANULARITY_FORMAT, Granularities.HOUR, Granularities.MINUTE), iae.getMessage());
throw iae;
}
Assert.fail("Should not have reached here!");
}
use of org.apache.druid.java.util.common.IAE in project druid by druid-io.
the class GroupByQueryEngineV2 method process.
public static Sequence<ResultRow> process(final GroupByQuery query, @Nullable final StorageAdapter storageAdapter, final NonBlockingPool<ByteBuffer> intermediateResultsBufferPool, final GroupByQueryConfig querySpecificConfig) {
if (storageAdapter == null) {
throw new ISE("Null storage adapter found. Probably trying to issue a query against a segment being memory unmapped.");
}
final List<Interval> intervals = query.getQuerySegmentSpec().getIntervals();
if (intervals.size() != 1) {
throw new IAE("Should only have one interval, got[%s]", intervals);
}
final ResourceHolder<ByteBuffer> bufferHolder = intermediateResultsBufferPool.take();
try {
final String fudgeTimestampString = NullHandling.emptyToNullIfNeeded(query.getContextValue(GroupByStrategyV2.CTX_KEY_FUDGE_TIMESTAMP, null));
final DateTime fudgeTimestamp = fudgeTimestampString == null ? null : DateTimes.utc(Long.parseLong(fudgeTimestampString));
final Filter filter = Filters.convertToCNFFromQueryContext(query, Filters.toFilter(query.getFilter()));
final Interval interval = Iterables.getOnlyElement(query.getIntervals());
final boolean doVectorize = QueryContexts.getVectorize(query).shouldVectorize(VectorGroupByEngine.canVectorize(query, storageAdapter, filter));
final Sequence<ResultRow> result;
if (doVectorize) {
result = VectorGroupByEngine.process(query, storageAdapter, bufferHolder.get(), fudgeTimestamp, filter, interval, querySpecificConfig);
} else {
result = processNonVectorized(query, storageAdapter, bufferHolder.get(), fudgeTimestamp, querySpecificConfig, filter, interval);
}
return result.withBaggage(bufferHolder);
} catch (Throwable e) {
bufferHolder.close();
throw e;
}
}
use of org.apache.druid.java.util.common.IAE in project druid by druid-io.
the class GroupByQueryEngine method process.
public Sequence<Row> process(final GroupByQuery query, final StorageAdapter storageAdapter) {
if (storageAdapter == null) {
throw new ISE("Null storage adapter found. Probably trying to issue a query against a segment being memory unmapped.");
}
if (!query.getContextValue(GroupByQueryConfig.CTX_KEY_ENABLE_MULTI_VALUE_UNNESTING, true)) {
throw new UOE("GroupBy v1 does not support %s as false. Set %s to true or use groupBy v2", GroupByQueryConfig.CTX_KEY_ENABLE_MULTI_VALUE_UNNESTING, GroupByQueryConfig.CTX_KEY_ENABLE_MULTI_VALUE_UNNESTING);
}
final List<Interval> intervals = query.getQuerySegmentSpec().getIntervals();
if (intervals.size() != 1) {
throw new IAE("Should only have one interval, got[%s]", intervals);
}
Filter filter = Filters.convertToCNFFromQueryContext(query, Filters.toFilter(query.getDimFilter()));
final Sequence<Cursor> cursors = storageAdapter.makeCursors(filter, intervals.get(0), query.getVirtualColumns(), query.getGranularity(), false, null);
final ResourceHolder<ByteBuffer> bufferHolder = intermediateResultsBufferPool.take();
return Sequences.concat(Sequences.withBaggage(Sequences.map(cursors, new Function<Cursor, Sequence<Row>>() {
@Override
public Sequence<Row> apply(final Cursor cursor) {
return new BaseSequence<>(new BaseSequence.IteratorMaker<Row, RowIterator>() {
@Override
public RowIterator make() {
return new RowIterator(query, cursor, bufferHolder.get(), config.get());
}
@Override
public void cleanup(RowIterator iterFromMake) {
CloseableUtils.closeAndWrapExceptions(iterFromMake);
}
});
}
}), bufferHolder));
}
Aggregations