use of com.serotonin.m2m2.view.stats.SeriesValueTime in project ma-modules-public by infiniteautomation.
the class AggregateValueMapper method apply.
@Override
public StreamingPointValueTimeModel apply(SeriesValueTime<? extends AggregateValue> value) {
DataPointVO point = lookupPoint(value.getSeriesId());
RollupEnum rollup = rollup(point);
AggregateValue aggregate = value.getValue();
StreamingPointValueTimeModel model = new StreamingPointValueTimeModel(point.getXid(), value.getTime());
if (rollup == RollupEnum.ALL) {
AllStatisticsModel allStatisticsModel = getAllRollup(point, aggregate);
model.setAllStatistics(allStatisticsModel);
} else {
ValueTimeModel rollupValue = getRollupValue(point, aggregate, rollup);
model.setValueModel(rollupValue);
}
if (fields.contains(PointValueField.CACHED)) {
model.setCached(false);
}
if (fields.contains(PointValueField.BOOKEND)) {
model.setBookend(false);
}
return copyPointPropertiesToModel(point, model);
}
use of com.serotonin.m2m2.view.stats.SeriesValueTime in project ma-core-public by infiniteautomation.
the class AggregateDao method aggregate.
/**
* Aggregate a stream of raw point values into aggregate statistics. Mango statistics rely on knowing the initial
* value of the point before the "from" time, you must include an initial start value in the stream (if one exists).
* The timestamp of this start value should be less than the "from" time.
*
* @param point data point
* @param from from time (inclusive)
* @param to to time (exclusive)
* @param pointValues stream of point values, must include a start value (at time < from) for accurate statistics
* @param aggregationPeriod aggregation period (bucket/window size)
* @return stream of aggregates
*/
default Stream<SeriesValueTime<AggregateValue>> aggregate(DataPointVO point, ZonedDateTime from, ZonedDateTime to, Stream<? extends PointValueTime> pointValues, TemporalAmount aggregationPeriod) {
BucketCalculator bucketCalc = new TemporalAmountBucketCalculator(from, to, aggregationPeriod);
AbstractPointValueTimeQuantizer<?> quantizer;
switch(point.getPointLocator().getDataType()) {
case BINARY:
case MULTISTATE:
quantizer = new StartsAndRuntimeListQuantizer(bucketCalc);
break;
case NUMERIC:
quantizer = new AnalogStatisticsQuantizer(bucketCalc);
break;
case ALPHANUMERIC:
quantizer = new ValueChangeCounterQuantizer(bucketCalc);
break;
default:
throw new IllegalStateException("Unknown data type: " + point.getPointLocator().getDataType());
}
Stream<AggregateValue> aggregateStream = StatisticsAggregator.aggregate(pointValues, quantizer).filter(v -> v instanceof AggregateValue).map(v -> (AggregateValue) v);
return aggregateStream.map(v -> new DefaultSeriesValueTime<>(point.getSeriesId(), v.getPeriodStartTime(), v));
}
Aggregations