use of net.opentsdb.rollup.RollupInterval in project opentsdb by OpenTSDB.
the class TestTsdbQueryRollup method run10mMinLongSingleTS.
@Test
public void run10mMinLongSingleTS() throws Exception {
final RollupInterval interval = rollup_config.getRollupInterval("10m");
final Aggregator aggr = Aggregators.MIN;
long start_timestamp = 1356998400L;
long end_timestamp = 1357041600L;
storeLongRollup(1356998400L, end_timestamp, false, false, interval, aggr);
final int time_interval = interval.getIntervalSeconds();
setQuery(interval.getInterval(), aggr, tags, aggr);
query.configureFromQuery(ts_query, 0);
final DataPoints[] dps = query.run();
assertEquals(1, dps.length);
assertEquals(METRIC_STRING, dps[0].metricName());
assertTrue(dps[0].getAggregatedTags().isEmpty());
assertNull(dps[0].getAnnotations());
assertEquals(TAGV_STRING, dps[0].getTags().get(TAGK_STRING));
int i = 600;
long ts = start_timestamp * 1000;
for (final DataPoint dp : dps[0]) {
assertFalse(dp.isInteger());
assertEquals(i, dp.doubleValue(), 0.0001);
assertEquals(ts, dp.timestamp());
ts += time_interval * 1000;
i += time_interval;
}
assertEquals(73, dps[0].size());
}
use of net.opentsdb.rollup.RollupInterval in project opentsdb by OpenTSDB.
the class TestTsdbQueryRollup method run10mSumFloatSingleTSRate.
@Test
public void run10mSumFloatSingleTSRate() throws Exception {
final RollupInterval interval = rollup_config.getRollupInterval("10m");
final Aggregator aggr = Aggregators.SUM;
final long start_timestamp = 1356998400;
final long end_timestamp = 1357041600;
storeFloatRollup(start_timestamp, end_timestamp, false, false, interval, aggr);
setQuery(interval.getInterval(), aggr, tags, aggr);
ts_query.getQueries().get(0).setRate(true);
query.configureFromQuery(ts_query, 0);
final DataPoints[] dps = query.run();
assertEquals(1, dps.length);
assertEquals(METRIC_STRING, dps[0].metricName());
assertTrue(dps[0].getAggregatedTags().isEmpty());
assertNull(dps[0].getAnnotations());
assertEquals(TAGV_STRING, dps[0].getTags().get(TAGK_STRING));
long expected_timestamp = (start_timestamp + interval.getIntervalSeconds()) * 1000;
for (DataPoint dp : dps[0]) {
assertEquals(1.0F, dp.doubleValue(), 0.00001);
assertEquals(expected_timestamp, dp.timestamp());
expected_timestamp += interval.getIntervalSeconds() * 1000;
}
assertEquals(72, dps[0].size());
}
use of net.opentsdb.rollup.RollupInterval in project opentsdb by OpenTSDB.
the class TestTsdbQuery method needsSplittingReturnsFalseIfNoSLAConfigured.
@Test
public void needsSplittingReturnsFalseIfNoSLAConfigured() {
Whitebox.setInternalState(tsdb, "rollups_split_queries", true);
RollupInterval oneHourWithDelay = RollupInterval.builder().setTable("fake-rollup-table").setPreAggregationTable("fake-preagg-table").setInterval("1h").setRowSpan("1d").setDelaySla(null).build();
RollupQuery rollup_query = new RollupQuery(oneHourWithDelay, Aggregators.SUM, 3600000, Aggregators.SUM);
Whitebox.setInternalState(query, "rollup_query", rollup_query);
assertTrue(query.isRollupQuery());
assertFalse(query.needsSplitting());
}
use of net.opentsdb.rollup.RollupInterval in project opentsdb by OpenTSDB.
the class TSDB method addAggregatePointInternal.
Deferred<Object> addAggregatePointInternal(final String metric, final long timestamp, final byte[] value, final Map<String, String> tags, final short flags, final boolean is_groupby, final String interval, final String rollup_aggregator, final String groupby_aggregator) {
if (interval != null && !interval.isEmpty() && rollup_config == null) {
throw new IllegalArgumentException("No rollup or aggregations were configured");
}
if (is_groupby && (groupby_aggregator == null || groupby_aggregator.isEmpty())) {
throw new IllegalArgumentException("Cannot write a group by data point " + "without specifying the aggregation function. Metric=" + metric + " tags=" + tags);
}
// and allow milliseconds for pre-aggregates
if (timestamp < 0 || ((timestamp & Const.SECOND_MASK) != 0)) {
throw new IllegalArgumentException((timestamp < 0 ? "negative " : "bad") + " timestamp=" + timestamp + " when trying to add value=" + Arrays.toString(value) + '/' + flags + " to metric=" + metric + ", tags=" + tags);
}
String agg_tag_value = tags.get(agg_tag_key);
if (agg_tag_value == null) {
if (!is_groupby) {
// it's a rollup on "raw" data.
if (tag_raw_data) {
tags.put(agg_tag_key, raw_agg_tag_value);
}
agg_tag_value = raw_agg_tag_value;
} else {
// pre-agged so use the aggregator as the tag
agg_tag_value = groupby_aggregator.toUpperCase();
tags.put(agg_tag_key, agg_tag_value);
}
} else {
// sanity check
if (!agg_tag_value.equalsIgnoreCase(groupby_aggregator)) {
throw new IllegalArgumentException("Given tag value for " + agg_tag_key + " of " + agg_tag_value + " did not match the group by " + "aggregator of " + groupby_aggregator + " for " + metric + " " + tags);
}
// force upper case
agg_tag_value = groupby_aggregator.toUpperCase();
tags.put(agg_tag_key, agg_tag_value);
}
if (is_groupby) {
try {
Aggregators.get(groupby_aggregator.toLowerCase());
} catch (NoSuchElementException e) {
throw new IllegalArgumentException("Invalid group by aggregator " + groupby_aggregator + " with metric " + metric + " " + tags);
}
if (rollups_block_derived && // TODO - create a better list of aggs to block
(agg_tag_value.equals("AVG") || agg_tag_value.equals("DEV"))) {
throw new IllegalArgumentException("Derived group by aggregations " + "are not allowed " + groupby_aggregator + " with metric " + metric + " " + tags);
}
}
IncomingDataPoints.checkMetricAndTags(metric, tags);
final RollupInterval rollup_interval = (interval == null || interval.isEmpty() ? null : rollup_config.getRollupInterval(interval));
final int aggregator_id = rollup_interval == null ? -1 : rollup_config.getIdForAggregator(rollup_aggregator);
final byte[] row = IncomingDataPoints.rowKeyTemplate(this, metric, tags);
final String rollup_agg = rollup_aggregator != null ? rollup_aggregator.toUpperCase() : null;
if (rollup_agg != null && rollups_block_derived && // TODO - create a better list of aggs to block
(rollup_agg.equals("AVG") || rollup_agg.equals("DEV"))) {
throw new IllegalArgumentException("Derived rollup aggregations " + "are not allowed " + rollup_agg + " with metric " + metric + " " + tags);
}
final int base_time = interval == null || interval.isEmpty() ? (int) (timestamp - (timestamp % Const.MAX_TIMESPAN)) : RollupUtils.getRollupBasetime(timestamp, rollup_interval);
final byte[] qualifier = interval == null || interval.isEmpty() ? Internal.buildQualifier(timestamp, flags) : RollupUtils.buildRollupQualifier(timestamp, base_time, flags, aggregator_id, rollup_interval);
/**
* Callback executed for chaining filter calls to see if the value
* should be written or not.
*/
final class WriteCB implements Callback<Deferred<Object>, Boolean> {
@Override
public Deferred<Object> call(final Boolean allowed) throws Exception {
if (!allowed) {
rejected_aggregate_dps.incrementAndGet();
return Deferred.fromResult(null);
}
Internal.setBaseTime(row, base_time);
// NOTE: Do not modify the row key after calculating and applying the salt
RowKey.prefixKeyWithSalt(row);
Deferred<Object> result;
final PutRequest point;
if (interval == null || interval.isEmpty()) {
if (!is_groupby) {
throw new IllegalArgumentException("Interval cannot be null " + "for a non-group by point");
}
point = new PutRequest(default_interval.getGroupbyTable(), row, FAMILY, qualifier, value);
} else {
point = new PutRequest(is_groupby ? rollup_interval.getGroupbyTable() : rollup_interval.getTemporalTable(), row, FAMILY, qualifier, value);
}
// TODO: Add a callback to time the latency of HBase and store the
// timing in a moving Histogram (once we have a class for this).
result = client.put(point);
// the meta tracking.
return result;
}
}
if (ts_filter != null) {
return ts_filter.allowDataPoint(metric, timestamp, value, tags, flags).addCallbackDeferring(new WriteCB());
}
try {
return new WriteCB().call(true);
} catch (Exception e) {
return Deferred.fromError(e);
}
}
use of net.opentsdb.rollup.RollupInterval in project opentsdb by OpenTSDB.
the class BaseTsdbTest method makeRollupQuery.
RollupQuery makeRollupQuery() {
Whitebox.setInternalState(tsdb, "rollups_split_queries", true);
final RollupInterval oneHourWithDelay = RollupInterval.builder().setTable("fake-rollup-table").setPreAggregationTable("fake-preagg-table").setInterval("1h").setRowSpan("1d").setDelaySla("2d").build();
return new RollupQuery(oneHourWithDelay, Aggregators.SUM, 3600000, Aggregators.SUM);
}
Aggregations