Search in sources :

Example 1 with AggLevel

use of org.apache.storm.metricstore.AggLevel in project storm by apache.

the class RocksDbKeyTest method testMetricKey.

@Test
public void testMetricKey() {
    AggLevel aggLevel = AggLevel.AGG_LEVEL_10_MIN;
    int topologyId = 0x45665;
    long timestamp = System.currentTimeMillis();
    int metricId = 0xF3916034;
    int componentId = 0x82915031;
    int executorId = 0x434738;
    int hostId = 0x4348394;
    int port = 3456;
    int streamId = 0x84221956;
    RocksDbKey key = RocksDbKey.createMetricKey(aggLevel, topologyId, timestamp, metricId, componentId, executorId, hostId, port, streamId);
    Assert.assertEquals(topologyId, key.getTopologyId());
    Assert.assertEquals(timestamp, key.getTimestamp());
    Assert.assertEquals(metricId, key.getMetricId());
    Assert.assertEquals(componentId, key.getComponentId());
    Assert.assertEquals(executorId, key.getExecutorId());
    Assert.assertEquals(hostId, key.getHostnameId());
    Assert.assertEquals(port, key.getPort());
    Assert.assertEquals(streamId, key.getStreamId());
}
Also used : AggLevel(org.apache.storm.metricstore.AggLevel) Test(org.junit.Test)

Example 2 with AggLevel

use of org.apache.storm.metricstore.AggLevel in project storm by apache.

the class RocksDbStore method scanInternal.

// perform a scan given filter options, and return results in either Metric or raw data.
private void scanInternal(FilterOptions filter, ScanCallback scanCallback, RocksDbScanCallback rawCallback) throws MetricException {
    Map<String, Integer> stringToIdCache = new HashMap<>();
    Map<Integer, String> idToStringCache = new HashMap<>();
    int startTopologyId = 0;
    int endTopologyId = 0xFFFFFFFF;
    String filterTopologyId = filter.getTopologyId();
    if (filterTopologyId != null) {
        int topologyId = lookupMetadataString(KeyType.TOPOLOGY_STRING, filterTopologyId, stringToIdCache);
        if (INVALID_METADATA_STRING_ID == topologyId) {
            // string does not exist in database
            return;
        }
        startTopologyId = topologyId;
        endTopologyId = topologyId;
    }
    long startTime = filter.getStartTime();
    long endTime = filter.getEndTime();
    int startMetricId = 0;
    int endMetricId = 0xFFFFFFFF;
    String filterMetricName = filter.getMetricName();
    if (filterMetricName != null) {
        int metricId = lookupMetadataString(KeyType.METRIC_STRING, filterMetricName, stringToIdCache);
        if (INVALID_METADATA_STRING_ID == metricId) {
            // string does not exist in database
            return;
        }
        startMetricId = metricId;
        endMetricId = metricId;
    }
    int startComponentId = 0;
    int endComponentId = 0xFFFFFFFF;
    String filterComponentId = filter.getComponentId();
    if (filterComponentId != null) {
        int componentId = lookupMetadataString(KeyType.COMPONENT_STRING, filterComponentId, stringToIdCache);
        if (INVALID_METADATA_STRING_ID == componentId) {
            // string does not exist in database
            return;
        }
        startComponentId = componentId;
        endComponentId = componentId;
    }
    int startExecutorId = 0;
    int endExecutorId = 0xFFFFFFFF;
    String filterExecutorName = filter.getExecutorId();
    if (filterExecutorName != null) {
        int executorId = lookupMetadataString(KeyType.EXEC_ID_STRING, filterExecutorName, stringToIdCache);
        if (INVALID_METADATA_STRING_ID == executorId) {
            // string does not exist in database
            return;
        }
        startExecutorId = executorId;
        endExecutorId = executorId;
    }
    int startHostId = 0;
    int endHostId = 0xFFFFFFFF;
    String filterHostId = filter.getHostId();
    if (filterHostId != null) {
        int hostId = lookupMetadataString(KeyType.HOST_STRING, filterHostId, stringToIdCache);
        if (INVALID_METADATA_STRING_ID == hostId) {
            // string does not exist in database
            return;
        }
        startHostId = hostId;
        endHostId = hostId;
    }
    int startPort = 0;
    int endPort = 0xFFFFFFFF;
    Integer filterPort = filter.getPort();
    if (filterPort != null) {
        startPort = filterPort;
        endPort = filterPort;
    }
    int startStreamId = 0;
    int endStreamId = 0xFFFFFFFF;
    String filterStreamId = filter.getStreamId();
    if (filterStreamId != null) {
        int streamId = lookupMetadataString(KeyType.HOST_STRING, filterStreamId, stringToIdCache);
        if (INVALID_METADATA_STRING_ID == streamId) {
            // string does not exist in database
            return;
        }
        startStreamId = streamId;
        endStreamId = streamId;
    }
    try (ReadOptions ro = new ReadOptions()) {
        ro.setTotalOrderSeek(true);
        for (AggLevel aggLevel : filter.getAggLevels()) {
            RocksDbKey startKey = RocksDbKey.createMetricKey(aggLevel, startTopologyId, startTime, startMetricId, startComponentId, startExecutorId, startHostId, startPort, startStreamId);
            RocksDbKey endKey = RocksDbKey.createMetricKey(aggLevel, endTopologyId, endTime, endMetricId, endComponentId, endExecutorId, endHostId, endPort, endStreamId);
            try (RocksIterator iterator = db.newIterator(ro)) {
                for (iterator.seek(startKey.getRaw()); iterator.isValid(); iterator.next()) {
                    RocksDbKey key = new RocksDbKey(iterator.key());
                    if (key.compareTo(endKey) > 0) {
                        // past limit, quit
                        break;
                    }
                    if (startTopologyId != 0 && key.getTopologyId() != startTopologyId) {
                        continue;
                    }
                    long timestamp = key.getTimestamp();
                    if (timestamp < startTime || timestamp > endTime) {
                        continue;
                    }
                    if (startMetricId != 0 && key.getMetricId() != startMetricId) {
                        continue;
                    }
                    if (startComponentId != 0 && key.getComponentId() != startComponentId) {
                        continue;
                    }
                    if (startExecutorId != 0 && key.getExecutorId() != startExecutorId) {
                        continue;
                    }
                    if (startHostId != 0 && key.getHostnameId() != startHostId) {
                        continue;
                    }
                    if (startPort != 0 && key.getPort() != startPort) {
                        continue;
                    }
                    if (startStreamId != 0 && key.getStreamId() != startStreamId) {
                        continue;
                    }
                    RocksDbValue val = new RocksDbValue(iterator.value());
                    if (scanCallback != null) {
                        try {
                            // populate a metric
                            String metricName = metadataIdToString(KeyType.METRIC_STRING, key.getMetricId(), idToStringCache);
                            String topologyId = metadataIdToString(KeyType.TOPOLOGY_STRING, key.getTopologyId(), idToStringCache);
                            String componentId = metadataIdToString(KeyType.COMPONENT_STRING, key.getComponentId(), idToStringCache);
                            String executorId = metadataIdToString(KeyType.EXEC_ID_STRING, key.getExecutorId(), idToStringCache);
                            String hostname = metadataIdToString(KeyType.HOST_STRING, key.getHostnameId(), idToStringCache);
                            String streamId = metadataIdToString(KeyType.STREAM_ID_STRING, key.getStreamId(), idToStringCache);
                            Metric metric = new Metric(metricName, timestamp, topologyId, 0.0, componentId, executorId, hostname, streamId, key.getPort(), aggLevel);
                            val.populateMetric(metric);
                            // callback to caller
                            scanCallback.cb(metric);
                        } catch (MetricException e) {
                            LOG.warn("Failed to report found metric: {}", e.getMessage());
                        }
                    } else {
                        try {
                            if (!rawCallback.cb(key, val)) {
                                return;
                            }
                        } catch (RocksDBException e) {
                            throw new MetricException("Error reading metrics data", e);
                        }
                    }
                }
            }
        }
    }
}
Also used : RocksDBException(org.rocksdb.RocksDBException) HashMap(java.util.HashMap) RocksIterator(org.rocksdb.RocksIterator) MetricException(org.apache.storm.metricstore.MetricException) AggLevel(org.apache.storm.metricstore.AggLevel) ReadOptions(org.rocksdb.ReadOptions) Metric(org.apache.storm.metricstore.Metric)

Example 3 with AggLevel

use of org.apache.storm.metricstore.AggLevel in project storm by apache.

the class RocksDbMetricsWriter method processInsert.

/**
 * Performs the actual metric insert, and aggregates over all bucket times.
 *
 * @param metric  Metric to store
 * @throws MetricException  if database write fails
 */
private void processInsert(Metric metric) throws MetricException {
    // convert all strings to numeric Ids for the metric key and add to the metadata cache
    long metricTimestamp = metric.getTimestamp();
    Integer topologyId = storeMetadataString(KeyType.TOPOLOGY_STRING, metric.getTopologyId(), metricTimestamp);
    Integer metricId = storeMetadataString(KeyType.METRIC_STRING, metric.getMetricName(), metricTimestamp);
    Integer componentId = storeMetadataString(KeyType.COMPONENT_STRING, metric.getComponentId(), metricTimestamp);
    Integer executorId = storeMetadataString(KeyType.EXEC_ID_STRING, metric.getExecutorId(), metricTimestamp);
    Integer hostId = storeMetadataString(KeyType.HOST_STRING, metric.getHostname(), metricTimestamp);
    Integer streamId = storeMetadataString(KeyType.STREAM_ID_STRING, metric.getStreamId(), metricTimestamp);
    RocksDbKey key = RocksDbKey.createMetricKey(AggLevel.AGG_LEVEL_NONE, topologyId, metric.getTimestamp(), metricId, componentId, executorId, hostId, metric.getPort(), streamId);
    // save metric key/value to be batched
    RocksDbValue value = new RocksDbValue(metric);
    insertBatch.put(key, value);
    // Aggregate matching metrics over bucket timeframes.
    // We'll process starting with the longest bucket.  If the metric for this does not exist, we don't have to
    // search for the remaining bucket metrics.
    ListIterator li = aggBuckets.listIterator(aggBuckets.size());
    boolean populate = true;
    while (li.hasPrevious()) {
        AggLevel bucket = (AggLevel) li.previous();
        Metric aggMetric = new Metric(metric);
        aggMetric.setAggLevel(bucket);
        long msToBucket = 1000L * 60L * bucket.getValue();
        long roundedToBucket = msToBucket * (metric.getTimestamp() / msToBucket);
        aggMetric.setTimestamp(roundedToBucket);
        RocksDbKey aggKey = RocksDbKey.createMetricKey(bucket, topologyId, aggMetric.getTimestamp(), metricId, componentId, executorId, hostId, aggMetric.getPort(), streamId);
        if (populate) {
            // retrieve any existing aggregation matching this one and update the values
            if (store.populateFromKey(aggKey, aggMetric)) {
                aggMetric.addValue(metric.getValue());
            } else {
                // aggregating metric did not exist, don't look for further ones with smaller timestamps
                populate = false;
            }
        }
        // save metric key/value to be batched
        RocksDbValue aggVal = new RocksDbValue(aggMetric);
        insertBatch.put(aggKey, aggVal);
    }
    processBatchInsert(insertBatch);
    insertBatch.clear();
}
Also used : AggLevel(org.apache.storm.metricstore.AggLevel) Metric(org.apache.storm.metricstore.Metric) ListIterator(java.util.ListIterator)

Aggregations

AggLevel (org.apache.storm.metricstore.AggLevel)3 Metric (org.apache.storm.metricstore.Metric)2 HashMap (java.util.HashMap)1 ListIterator (java.util.ListIterator)1 MetricException (org.apache.storm.metricstore.MetricException)1 Test (org.junit.Test)1 ReadOptions (org.rocksdb.ReadOptions)1 RocksDBException (org.rocksdb.RocksDBException)1 RocksIterator (org.rocksdb.RocksIterator)1