Search in sources :

Example 1 with TagVFilter

use of net.opentsdb.query.filter.TagVFilter in project opentsdb by OpenTSDB.

the class TsdbQuery method findSpans.

/**
   * Finds all the {@link Span}s that match this query.
   * This is what actually scans the HBase table and loads the data into
   * {@link Span}s.
   * @return A map from HBase row key to the {@link Span} for that row key.
   * Since a {@link Span} actually contains multiple HBase rows, the row key
   * stored in the map has its timestamp zero'ed out.
   * @throws HBaseException if there was a problem communicating with HBase to
   * perform the search.
   * @throws IllegalArgumentException if bad data was retrieved from HBase.
   */
private Deferred<TreeMap<byte[], Span>> findSpans() throws HBaseException {
    final short metric_width = tsdb.metrics.width();
    final // The key is a row key from HBase.
    TreeMap<byte[], Span> spans = new TreeMap<byte[], Span>(new SpanCmp((short) (Const.SALT_WIDTH() + metric_width)));
    // Copy only the filters that should trigger a tag resolution. If this list
    // is empty due to literals or a wildcard star, then we'll save a TON of
    // UID lookups
    final List<TagVFilter> scanner_filters;
    if (filters != null) {
        scanner_filters = new ArrayList<TagVFilter>(filters.size());
        for (final TagVFilter filter : filters) {
            if (filter.postScan()) {
                scanner_filters.add(filter);
            }
        }
    } else {
        scanner_filters = null;
    }
    if (Const.SALT_WIDTH() > 0) {
        final List<Scanner> scanners = new ArrayList<Scanner>(Const.SALT_BUCKETS());
        for (int i = 0; i < Const.SALT_BUCKETS(); i++) {
            scanners.add(getScanner(i));
        }
        scan_start_time = DateTime.nanoTime();
        return new SaltScanner(tsdb, metric, scanners, spans, scanner_filters, delete, query_stats, query_index).scan();
    }
    scan_start_time = DateTime.nanoTime();
    final Scanner scanner = getScanner();
    if (query_stats != null) {
        query_stats.addScannerId(query_index, 0, scanner.toString());
    }
    final Deferred<TreeMap<byte[], Span>> results = new Deferred<TreeMap<byte[], Span>>();
    /**
    * Scanner callback executed recursively each time we get a set of data
    * from storage. This is responsible for determining what columns are
    * returned and issuing requests to load leaf objects.
    * When the scanner returns a null set of rows, the method initiates the
    * final callback.
    */
    final class ScannerCB implements Callback<Object, ArrayList<ArrayList<KeyValue>>> {

        int nrows = 0;

        boolean seenAnnotation = false;

        long scanner_start = DateTime.nanoTime();

        long timeout = tsdb.getConfig().getLong("tsd.query.timeout");

        private final Set<String> skips = new HashSet<String>();

        private final Set<String> keepers = new HashSet<String>();

        // only used for salted scanners
        private final int index = 0;

        /** nanosecond timestamps */
        // reset each time we send an RPC to HBase
        private long fetch_start = 0;

        // cumulation of time waiting on HBase
        private long fetch_time = 0;

        // cumulation of time resolving UIDs
        private long uid_resolve_time = 0;

        private long uids_resolved = 0;

        // cumulation of time compacting
        private long compaction_time = 0;

        private long dps_pre_filter = 0;

        private long rows_pre_filter = 0;

        private long dps_post_filter = 0;

        private long rows_post_filter = 0;

        /** Error callback that will capture an exception from AsyncHBase and store
       * it so we can bubble it up to the caller.
       */
        class ErrorCB implements Callback<Object, Exception> {

            @Override
            public Object call(final Exception e) throws Exception {
                LOG.error("Scanner " + scanner + " threw an exception", e);
                close(e);
                return null;
            }
        }

        /**
      * Starts the scanner and is called recursively to fetch the next set of
      * rows from the scanner.
      * @return The map of spans if loaded successfully, null if no data was
      * found
      */
        public Object scan() {
            fetch_start = DateTime.nanoTime();
            return scanner.nextRows().addCallback(this).addErrback(new ErrorCB());
        }

        /**
      * Loops through each row of the scanner results and parses out data
      * points and optional meta data
      * @return null if no rows were found, otherwise the TreeMap with spans
      */
        @Override
        public Object call(final ArrayList<ArrayList<KeyValue>> rows) throws Exception {
            fetch_time += DateTime.nanoTime() - fetch_start;
            try {
                if (rows == null) {
                    scanlatency.add((int) DateTime.msFromNano(fetch_time));
                    LOG.info(TsdbQuery.this + " matched " + nrows + " rows in " + spans.size() + " spans in " + DateTime.msFromNano(fetch_time) + "ms");
                    close(null);
                    return null;
                }
                if (timeout > 0 && DateTime.msFromNanoDiff(DateTime.nanoTime(), scanner_start) > timeout) {
                    throw new InterruptedException("Query timeout exceeded!");
                }
                rows_pre_filter += rows.size();
                // used for UID resolution if a filter is involved
                final List<Deferred<Object>> lookups = filters != null && !filters.isEmpty() ? new ArrayList<Deferred<Object>>(rows.size()) : null;
                for (final ArrayList<KeyValue> row : rows) {
                    final byte[] key = row.get(0).key();
                    if (Bytes.memcmp(metric, key, 0, metric_width) != 0) {
                        scanner.close();
                        throw new IllegalDataException("HBase returned a row that doesn't match" + " our scanner (" + scanner + ")! " + row + " does not start" + " with " + Arrays.toString(metric));
                    }
                    // columns.
                    for (final KeyValue kv : row) {
                        if (kv.qualifier().length % 2 == 0) {
                            if (kv.qualifier().length == 2 || kv.qualifier().length == 4) {
                                ++dps_pre_filter;
                            } else {
                                // same precision. This is likely incorrect.
                                if (Internal.inMilliseconds(kv.qualifier())) {
                                    dps_pre_filter += (kv.qualifier().length / 4);
                                } else {
                                    dps_pre_filter += (kv.qualifier().length / 2);
                                }
                            }
                        } else if (kv.qualifier()[0] == AppendDataPoints.APPEND_COLUMN_PREFIX) {
                            // with appends we don't have a good rough estimate as the length
                            // can vary widely with the value length variability. Therefore we
                            // have to iterate.
                            int idx = 0;
                            int qlength = 0;
                            while (idx < kv.value().length) {
                                qlength = Internal.getQualifierLength(kv.value(), idx);
                                idx += qlength + Internal.getValueLengthFromQualifier(kv.value(), idx);
                                ++dps_pre_filter;
                            }
                        }
                    }
                    // TODO - byte set instead of a string for the uid may be faster
                    if (scanner_filters != null && !scanner_filters.isEmpty()) {
                        lookups.clear();
                        final String tsuid = UniqueId.uidToString(UniqueId.getTSUIDFromKey(key, TSDB.metrics_width(), Const.TIMESTAMP_BYTES));
                        if (skips.contains(tsuid)) {
                            continue;
                        }
                        if (!keepers.contains(tsuid)) {
                            final long uid_start = DateTime.nanoTime();
                            /** CB to called after all of the UIDs have been resolved */
                            class MatchCB implements Callback<Object, ArrayList<Boolean>> {

                                @Override
                                public Object call(final ArrayList<Boolean> matches) throws Exception {
                                    for (final boolean matched : matches) {
                                        if (!matched) {
                                            skips.add(tsuid);
                                            return null;
                                        }
                                    }
                                    // matched all, good data
                                    keepers.add(tsuid);
                                    processRow(key, row);
                                    return null;
                                }
                            }
                            /** Resolves all of the row key UIDs to their strings for filtering */
                            class GetTagsCB implements Callback<Deferred<ArrayList<Boolean>>, Map<String, String>> {

                                @Override
                                public Deferred<ArrayList<Boolean>> call(final Map<String, String> tags) throws Exception {
                                    uid_resolve_time += (DateTime.nanoTime() - uid_start);
                                    uids_resolved += tags.size();
                                    final List<Deferred<Boolean>> matches = new ArrayList<Deferred<Boolean>>(scanner_filters.size());
                                    for (final TagVFilter filter : scanner_filters) {
                                        matches.add(filter.match(tags));
                                    }
                                    return Deferred.group(matches);
                                }
                            }
                            lookups.add(Tags.getTagsAsync(tsdb, key).addCallbackDeferring(new GetTagsCB()).addBoth(new MatchCB()));
                        } else {
                            processRow(key, row);
                        }
                    } else {
                        processRow(key, row);
                    }
                }
                // if we don't have filters.
                if (lookups != null && lookups.size() > 0) {
                    class GroupCB implements Callback<Object, ArrayList<Object>> {

                        @Override
                        public Object call(final ArrayList<Object> group) throws Exception {
                            return scan();
                        }
                    }
                    return Deferred.group(lookups).addCallback(new GroupCB());
                } else {
                    return scan();
                }
            } catch (Exception e) {
                close(e);
                return null;
            }
        }

        /**
        * Finds or creates the span for this row, compacts it and stores it.
        * @param key The row key to use for fetching the span
        * @param row The row to add
        */
        void processRow(final byte[] key, final ArrayList<KeyValue> row) {
            ++rows_post_filter;
            if (delete) {
                final DeleteRequest del = new DeleteRequest(tsdb.dataTable(), key);
                tsdb.getClient().delete(del);
            }
            // columns.
            for (final KeyValue kv : row) {
                if (kv.qualifier().length % 2 == 0) {
                    if (kv.qualifier().length == 2 || kv.qualifier().length == 4) {
                        ++dps_post_filter;
                    } else {
                        // same precision. This is likely incorrect.
                        if (Internal.inMilliseconds(kv.qualifier())) {
                            dps_post_filter += (kv.qualifier().length / 4);
                        } else {
                            dps_post_filter += (kv.qualifier().length / 2);
                        }
                    }
                } else if (kv.qualifier()[0] == AppendDataPoints.APPEND_COLUMN_PREFIX) {
                    // with appends we don't have a good rough estimate as the length
                    // can vary widely with the value length variability. Therefore we
                    // have to iterate.
                    int idx = 0;
                    int qlength = 0;
                    while (idx < kv.value().length) {
                        qlength = Internal.getQualifierLength(kv.value(), idx);
                        idx += qlength + Internal.getValueLengthFromQualifier(kv.value(), idx);
                        ++dps_post_filter;
                    }
                }
            }
            Span datapoints = spans.get(key);
            if (datapoints == null) {
                datapoints = new Span(tsdb);
                spans.put(key, datapoints);
            }
            final long compaction_start = DateTime.nanoTime();
            final KeyValue compacted = tsdb.compact(row, datapoints.getAnnotations());
            compaction_time += (DateTime.nanoTime() - compaction_start);
            seenAnnotation |= !datapoints.getAnnotations().isEmpty();
            if (compacted != null) {
                // Can be null if we ignored all KVs.
                datapoints.addRow(compacted);
                ++nrows;
            }
        }

        void close(final Exception e) {
            scanner.close();
            if (query_stats != null) {
                query_stats.addScannerStat(query_index, index, QueryStat.SCANNER_TIME, DateTime.nanoTime() - scan_start_time);
                // Scanner Stats
                /* Uncomment when AsyncHBase has this feature:
           query_stats.addScannerStat(query_index, index, 
               QueryStat.ROWS_FROM_STORAGE, scanner.getRowsFetched());
           query_stats.addScannerStat(query_index, index, 
               QueryStat.COLUMNS_FROM_STORAGE, scanner.getColumnsFetched());
           query_stats.addScannerStat(query_index, index, 
               QueryStat.BYTES_FROM_STORAGE, scanner.getBytesFetched()); */
                query_stats.addScannerStat(query_index, index, QueryStat.HBASE_TIME, fetch_time);
                query_stats.addScannerStat(query_index, index, QueryStat.SUCCESSFUL_SCAN, e == null ? 1 : 0);
                // Post Scan stats
                query_stats.addScannerStat(query_index, index, QueryStat.ROWS_PRE_FILTER, rows_pre_filter);
                query_stats.addScannerStat(query_index, index, QueryStat.DPS_PRE_FILTER, dps_pre_filter);
                query_stats.addScannerStat(query_index, index, QueryStat.ROWS_POST_FILTER, rows_post_filter);
                query_stats.addScannerStat(query_index, index, QueryStat.DPS_POST_FILTER, dps_post_filter);
                query_stats.addScannerStat(query_index, index, QueryStat.SCANNER_UID_TO_STRING_TIME, uid_resolve_time);
                query_stats.addScannerStat(query_index, index, QueryStat.UID_PAIRS_RESOLVED, uids_resolved);
                query_stats.addScannerStat(query_index, index, QueryStat.COMPACTION_TIME, compaction_time);
            }
            if (e != null) {
                results.callback(e);
            } else if (nrows < 1 && !seenAnnotation) {
                results.callback(null);
            } else {
                results.callback(spans);
            }
        }
    }
    new ScannerCB().scan();
    return results;
}
Also used : Scanner(org.hbase.async.Scanner) HashSet(java.util.HashSet) Set(java.util.Set) KeyValue(org.hbase.async.KeyValue) Deferred(com.stumbleupon.async.Deferred) ArrayList(java.util.ArrayList) TagVFilter(net.opentsdb.query.filter.TagVFilter) TreeMap(java.util.TreeMap) DeferredGroupException(com.stumbleupon.async.DeferredGroupException) HBaseException(org.hbase.async.HBaseException) Callback(com.stumbleupon.async.Callback) Map(java.util.Map) ByteMap(org.hbase.async.Bytes.ByteMap) TreeMap(java.util.TreeMap) DeleteRequest(org.hbase.async.DeleteRequest)

Example 2 with TagVFilter

use of net.opentsdb.query.filter.TagVFilter in project opentsdb by OpenTSDB.

the class TsdbQuery method configureFromQuery.

@Override
public Deferred<Object> configureFromQuery(final TSQuery query, final int index) {
    if (query.getQueries() == null || query.getQueries().isEmpty()) {
        throw new IllegalArgumentException("Missing sub queries");
    }
    if (index < 0 || index > query.getQueries().size()) {
        throw new IllegalArgumentException("Query index was out of range");
    }
    final TSSubQuery sub_query = query.getQueries().get(index);
    setStartTime(query.startTime());
    setEndTime(query.endTime());
    setDelete(query.getDelete());
    query_index = index;
    query_stats = query.getQueryStats();
    // set common options
    aggregator = sub_query.aggregator();
    rate = sub_query.getRate();
    rate_options = sub_query.getRateOptions();
    if (rate_options == null) {
        rate_options = new RateOptions();
    }
    downsampler = sub_query.downsamplingSpecification();
    filters = sub_query.getFilters();
    explicit_tags = sub_query.getExplicitTags();
    // if we have tsuids set, that takes precedence
    if (sub_query.getTsuids() != null && !sub_query.getTsuids().isEmpty()) {
        tsuids = new ArrayList<String>(sub_query.getTsuids());
        String first_metric = "";
        for (final String tsuid : tsuids) {
            if (first_metric.isEmpty()) {
                first_metric = tsuid.substring(0, TSDB.metrics_width() * 2).toUpperCase();
                continue;
            }
            final String metric = tsuid.substring(0, TSDB.metrics_width() * 2).toUpperCase();
            if (!first_metric.equals(metric)) {
                throw new IllegalArgumentException("One or more TSUIDs did not share the same metric [" + first_metric + "] [" + metric + "]");
            }
        }
        return Deferred.fromResult(null);
    } else {
        /** Triggers the group by resolution if we had filters to resolve */
        class FilterCB implements Callback<Object, ArrayList<byte[]>> {

            @Override
            public Object call(final ArrayList<byte[]> results) throws Exception {
                findGroupBys();
                return null;
            }
        }
        /** Resolve and group by tags after resolving the metric */
        class MetricCB implements Callback<Deferred<Object>, byte[]> {

            @Override
            public Deferred<Object> call(final byte[] uid) throws Exception {
                metric = uid;
                if (filters != null) {
                    final List<Deferred<byte[]>> deferreds = new ArrayList<Deferred<byte[]>>(filters.size());
                    for (final TagVFilter filter : filters) {
                        deferreds.add(filter.resolveTagkName(tsdb));
                    }
                    return Deferred.group(deferreds).addCallback(new FilterCB());
                } else {
                    return Deferred.fromResult(null);
                }
            }
        }
        // fire off the callback chain by resolving the metric first
        return tsdb.metrics.getIdAsync(sub_query.getMetric()).addCallbackDeferring(new MetricCB());
    }
}
Also used : Deferred(com.stumbleupon.async.Deferred) ArrayList(java.util.ArrayList) TagVFilter(net.opentsdb.query.filter.TagVFilter) Callback(com.stumbleupon.async.Callback)

Example 3 with TagVFilter

use of net.opentsdb.query.filter.TagVFilter in project opentsdb by OpenTSDB.

the class QueryRpc method parseMTypeSubQuery.

/**
   * Parses a query string "m=..." type query and adds it to the TSQuery.
   * This will generate a TSSubQuery and add it to the TSQuery if successful
   * @param query_string The value of the m query string parameter, i.e. what
   * comes after the equals sign
   * @param data_query The query we're building
   * @throws BadRequestException if we are unable to parse the query or it is
   * missing components
   */
private static void parseMTypeSubQuery(final String query_string, TSQuery data_query) {
    if (query_string == null || query_string.isEmpty()) {
        throw new BadRequestException("The query string was empty");
    }
    // m is of the following forms:
    // agg:[interval-agg:][rate:]metric[{tag=value,...}]
    // where the parts in square brackets `[' .. `]' are optional.
    final String[] parts = Tags.splitString(query_string, ':');
    int i = parts.length;
    if (i < 2 || i > 5) {
        throw new BadRequestException("Invalid parameter m=" + query_string + " (" + (i < 2 ? "not enough" : "too many") + " :-separated parts)");
    }
    final TSSubQuery sub_query = new TSSubQuery();
    // the aggregator is first
    sub_query.setAggregator(parts[0]);
    // Move to the last part (the metric name).
    i--;
    List<TagVFilter> filters = new ArrayList<TagVFilter>();
    sub_query.setMetric(Tags.parseWithMetricAndFilters(parts[i], filters));
    sub_query.setFilters(filters);
    // parse out the rate and downsampler 
    for (int x = 1; x < parts.length - 1; x++) {
        if (parts[x].toLowerCase().startsWith("rate")) {
            sub_query.setRate(true);
            if (parts[x].indexOf("{") >= 0) {
                sub_query.setRateOptions(QueryRpc.parseRateOptions(true, parts[x]));
            }
        } else if (Character.isDigit(parts[x].charAt(0))) {
            sub_query.setDownsample(parts[x]);
        } else if (parts[x].toLowerCase().startsWith("explicit_tags")) {
            sub_query.setExplicitTags(true);
        }
    }
    if (data_query.getQueries() == null) {
        final ArrayList<TSSubQuery> subs = new ArrayList<TSSubQuery>(1);
        data_query.setQueries(subs);
    }
    data_query.getQueries().add(sub_query);
}
Also used : TagVFilter(net.opentsdb.query.filter.TagVFilter) ArrayList(java.util.ArrayList) TSSubQuery(net.opentsdb.core.TSSubQuery) IncomingDataPoint(net.opentsdb.core.IncomingDataPoint)

Example 4 with TagVFilter

use of net.opentsdb.query.filter.TagVFilter in project opentsdb by OpenTSDB.

the class TestTSSubQuery method validateWithFilterAndGroupByFilterSameTag.

@Test
public void validateWithFilterAndGroupByFilterSameTag() {
    TSSubQuery sub = getMetricForValidate();
    final List<TagVFilter> filters = new ArrayList<TagVFilter>(1);
    filters.add(new TagVWildcardFilter("host", "veti*"));
    sub.setFilters(filters);
    Map<String, String> tags = new HashMap<String, String>();
    tags.put("host", TagVWildcardFilter.FILTER_NAME + "(*nari)");
    sub.setTags(tags);
    sub.validateAndSetQuery();
    assertEquals("sys.cpu.0", sub.getMetric());
    assertEquals(TagVWildcardFilter.FILTER_NAME + "(*nari)", sub.getTags().get("host"));
    assertEquals(1, sub.getFilters().size());
    assertEquals(Aggregators.SUM, sub.aggregator());
    assertEquals(Aggregators.AVG, sub.downsampler());
    assertEquals(300000, sub.downsampleInterval());
}
Also used : TagVFilter(net.opentsdb.query.filter.TagVFilter) TagVWildcardFilter(net.opentsdb.query.filter.TagVWildcardFilter) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Example 5 with TagVFilter

use of net.opentsdb.query.filter.TagVFilter in project opentsdb by OpenTSDB.

the class TestTags method parseWithMetricAndFilters.

@Test
public void parseWithMetricAndFilters() {
    final List<TagVFilter> filters = new ArrayList<TagVFilter>();
    String metric = Tags.parseWithMetricAndFilters("sys.cpu.user", filters);
    assertEquals("sys.cpu.user", metric);
    assertEquals(0, filters.size());
    filters.clear();
    metric = Tags.parseWithMetricAndFilters("sys.cpu.user{host=web01}", filters);
    assertEquals("sys.cpu.user", metric);
    assertEquals(1, filters.size());
    assertEquals("host", filters.get(0).getTagk());
    assertTrue(filters.get(0).isGroupBy());
    assertTrue(filters.get(0) instanceof TagVLiteralOrFilter);
    filters.clear();
    metric = Tags.parseWithMetricAndFilters("sys.cpu.user{host=*}", filters);
    assertEquals("sys.cpu.user", metric);
    assertEquals(1, filters.size());
    assertEquals("host", filters.get(0).getTagk());
    assertTrue(filters.get(0).isGroupBy());
    assertTrue(filters.get(0) instanceof TagVWildcardFilter);
    filters.clear();
    metric = Tags.parseWithMetricAndFilters("sys.cpu.user{host=web01}{}", filters);
    assertEquals("sys.cpu.user", metric);
    assertEquals(1, filters.size());
    assertEquals("host", filters.get(0).getTagk());
    assertTrue(filters.get(0).isGroupBy());
    assertTrue(filters.get(0) instanceof TagVLiteralOrFilter);
    filters.clear();
    metric = Tags.parseWithMetricAndFilters("sys.cpu.user{host=*,owner=regexp(.*ob)}", filters);
    assertEquals("sys.cpu.user", metric);
    assertEquals(2, filters.size());
    for (final TagVFilter filter : filters) {
        if (filter instanceof TagVWildcardFilter) {
            assertEquals("host", filter.getTagk());
        } else if (filter instanceof TagVRegexFilter) {
            assertEquals("owner", filter.getTagk());
        }
        assertTrue(filter.isGroupBy());
    }
    filters.clear();
    metric = Tags.parseWithMetricAndFilters("sys.cpu.user{}{host=web01}", filters);
    assertEquals("sys.cpu.user", metric);
    assertEquals(1, filters.size());
    assertEquals("host", filters.get(0).getTagk());
    assertFalse(filters.get(0).isGroupBy());
    assertTrue(filters.get(0) instanceof TagVLiteralOrFilter);
    filters.clear();
    metric = Tags.parseWithMetricAndFilters("sys.cpu.user{}{host=iliteral_or(web01|Web02)}", filters);
    assertEquals("sys.cpu.user", metric);
    assertEquals(1, filters.size());
    assertEquals("host", filters.get(0).getTagk());
    assertFalse(filters.get(0).isGroupBy());
    assertTrue(filters.get(0) instanceof TagVILiteralOrFilter);
    filters.clear();
    metric = Tags.parseWithMetricAndFilters("sys.cpu.user{}{host=iliteral_or(web01|Web02),owner=*}", filters);
    assertEquals("sys.cpu.user", metric);
    assertEquals(2, filters.size());
    for (final TagVFilter filter : filters) {
        if (filter instanceof TagVWildcardFilter) {
            assertEquals("owner", filter.getTagk());
        } else if (filter instanceof TagVILiteralOrFilter) {
            assertEquals("host", filter.getTagk());
        }
        assertFalse(filter.isGroupBy());
    }
    filters.clear();
    metric = Tags.parseWithMetricAndFilters("sys.cpu.user{host=iliteral_or(web01|Web02)}{owner=*}", filters);
    assertEquals("sys.cpu.user", metric);
    System.out.println(filters);
    assertEquals(2, filters.size());
    for (final TagVFilter filter : filters) {
        if (filter instanceof TagVWildcardFilter) {
            assertEquals("owner", filter.getTagk());
            assertFalse(filter.isGroupBy());
        } else if (filter instanceof TagVILiteralOrFilter) {
            assertEquals("host", filter.getTagk());
            assertTrue(filter.isGroupBy());
        }
    }
}
Also used : TagVFilter(net.opentsdb.query.filter.TagVFilter) TagVRegexFilter(net.opentsdb.query.filter.TagVRegexFilter) TagVILiteralOrFilter(net.opentsdb.query.filter.TagVLiteralOrFilter.TagVILiteralOrFilter) TagVWildcardFilter(net.opentsdb.query.filter.TagVWildcardFilter) ArrayList(java.util.ArrayList) Matchers.anyString(org.mockito.Matchers.anyString) TagVLiteralOrFilter(net.opentsdb.query.filter.TagVLiteralOrFilter) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Aggregations

TagVFilter (net.opentsdb.query.filter.TagVFilter)15 ArrayList (java.util.ArrayList)10 Test (org.junit.Test)7 TagVWildcardFilter (net.opentsdb.query.filter.TagVWildcardFilter)5 Callback (com.stumbleupon.async.Callback)3 Deferred (com.stumbleupon.async.Deferred)3 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)3 DeferredGroupException (com.stumbleupon.async.DeferredGroupException)2 HashMap (java.util.HashMap)2 Map (java.util.Map)2 TSSubQuery (net.opentsdb.core.TSSubQuery)2 ByteMap (org.hbase.async.Bytes.ByteMap)2 HBaseException (org.hbase.async.HBaseException)2 IOException (java.io.IOException)1 HashSet (java.util.HashSet)1 Set (java.util.Set)1 TreeMap (java.util.TreeMap)1 DataPoint (net.opentsdb.core.DataPoint)1 DataPoints (net.opentsdb.core.DataPoints)1 IncomingDataPoint (net.opentsdb.core.IncomingDataPoint)1