use of org.apache.storm.metricstore.Metric in project storm by apache.
the class RocksDbStoreTest method testMetricCleanup.
@Test
public void testMetricCleanup() throws Exception {
FilterOptions filter;
List<Metric> list;
// Share some common metadata strings to validate they do not get deleted
String commonTopologyId = "topology-cleanup-2";
String commonStreamId = "stream-cleanup-5";
String defaultS = "default";
Metric m1 = new Metric(defaultS, 40000000L, commonTopologyId, 1.0, "component-1", defaultS, "hostname-1", commonStreamId, 1, AggLevel.AGG_LEVEL_NONE);
Metric m2 = new Metric(defaultS, System.currentTimeMillis(), commonTopologyId, 1.0, "component-1", "executor-1", defaultS, commonStreamId, 1, AggLevel.AGG_LEVEL_NONE);
store.insert(m1);
store.insert(m2);
waitForInsertFinish(m2);
// validate at least two agg level none metrics exist
filter = new FilterOptions();
filter.addAggLevel(AggLevel.AGG_LEVEL_NONE);
list = getMetricsFromScan(filter);
Assert.assertTrue(list.size() >= 2);
// delete anything older than an hour
MetricsCleaner cleaner = new MetricsCleaner((RocksDbStore) store, 1, 1, null, new StormMetricsRegistry());
cleaner.purgeMetrics();
list = getMetricsFromScan(filter);
Assert.assertEquals(1, list.size());
Assert.assertTrue(list.contains(m2));
}
use of org.apache.storm.metricstore.Metric in project storm by apache.
the class RocksDbStoreTest method testScan.
@Test
public void testScan() throws Exception {
FilterOptions filter;
List<Metric> list;
Metric m1 = new Metric("metricType1", 50000000L, "Topo-m1", 1.0, "component-1", "executor-2", "hostname-1", "stream-1", 1, AggLevel.AGG_LEVEL_NONE);
Metric m2 = new Metric("metricType2", 50030000L, "Topo-m1", 1.0, "component-1", "executor-1", "hostname-2", "stream-2", 1, AggLevel.AGG_LEVEL_NONE);
Metric m3 = new Metric("metricType3", 50200000L, "Topo-m1", 1.0, "component-2", "executor-1", "hostname-1", "stream-3", 1, AggLevel.AGG_LEVEL_NONE);
Metric m4 = new Metric("metricType4", 50200000L, "Topo-m2", 1.0, "component-2", "executor-1", "hostname-2", "stream-4", 2, AggLevel.AGG_LEVEL_NONE);
store.insert(m1);
store.insert(m2);
store.insert(m3);
store.insert(m4);
waitForInsertFinish(m4);
// validate search by time
filter = new FilterOptions();
filter.addAggLevel(AggLevel.AGG_LEVEL_NONE);
filter.setStartTime(50000000L);
filter.setEndTime(50130000L);
list = getMetricsFromScan(filter);
Assert.assertEquals(2, list.size());
Assert.assertTrue(list.contains(m1));
Assert.assertTrue(list.contains(m2));
// validate search by topology id
filter = new FilterOptions();
filter.addAggLevel(AggLevel.AGG_LEVEL_NONE);
filter.setTopologyId("Topo-m2");
list = getMetricsFromScan(filter);
Assert.assertEquals(1, list.size());
Assert.assertTrue(list.contains(m4));
// validate search by metric id
filter = new FilterOptions();
filter.addAggLevel(AggLevel.AGG_LEVEL_NONE);
filter.setMetricName("metricType2");
list = getMetricsFromScan(filter);
Assert.assertEquals(1, list.size());
Assert.assertTrue(list.contains(m2));
// validate search by component id
filter = new FilterOptions();
filter.addAggLevel(AggLevel.AGG_LEVEL_NONE);
filter.setComponentId("component-2");
list = getMetricsFromScan(filter);
Assert.assertEquals(2, list.size());
Assert.assertTrue(list.contains(m3));
Assert.assertTrue(list.contains(m4));
// validate search by executor id
filter = new FilterOptions();
filter.addAggLevel(AggLevel.AGG_LEVEL_NONE);
filter.setExecutorId("executor-1");
list = getMetricsFromScan(filter);
Assert.assertEquals(3, list.size());
Assert.assertTrue(list.contains(m2));
Assert.assertTrue(list.contains(m3));
Assert.assertTrue(list.contains(m4));
// validate search by executor id
filter = new FilterOptions();
filter.addAggLevel(AggLevel.AGG_LEVEL_NONE);
filter.setExecutorId("executor-1");
list = getMetricsFromScan(filter);
Assert.assertEquals(3, list.size());
Assert.assertTrue(list.contains(m2));
Assert.assertTrue(list.contains(m3));
Assert.assertTrue(list.contains(m4));
// validate search by host id
filter = new FilterOptions();
filter.addAggLevel(AggLevel.AGG_LEVEL_NONE);
filter.setHostId("hostname-2");
list = getMetricsFromScan(filter);
Assert.assertEquals(2, list.size());
Assert.assertTrue(list.contains(m2));
Assert.assertTrue(list.contains(m4));
// validate search by port
filter = new FilterOptions();
filter.addAggLevel(AggLevel.AGG_LEVEL_NONE);
filter.setPort(1);
list = getMetricsFromScan(filter);
Assert.assertEquals(3, list.size());
Assert.assertTrue(list.contains(m1));
Assert.assertTrue(list.contains(m2));
Assert.assertTrue(list.contains(m3));
// validate search by stream id
filter = new FilterOptions();
filter.addAggLevel(AggLevel.AGG_LEVEL_NONE);
filter.setStreamId("stream-4");
list = getMetricsFromScan(filter);
Assert.assertEquals(1, list.size());
Assert.assertTrue(list.contains(m4));
// validate 4 metrics (aggregations) found for m4 for all agglevels when searching by port
filter = new FilterOptions();
filter.setPort(2);
list = getMetricsFromScan(filter);
Assert.assertEquals(4, list.size());
Assert.assertTrue(list.contains(m4));
Assert.assertFalse(list.contains(m1));
Assert.assertFalse(list.contains(m2));
Assert.assertFalse(list.contains(m3));
// validate search by topology id and executor id
filter = new FilterOptions();
filter.addAggLevel(AggLevel.AGG_LEVEL_NONE);
filter.setTopologyId("Topo-m1");
filter.setExecutorId("executor-1");
list = getMetricsFromScan(filter);
Assert.assertEquals(2, list.size());
Assert.assertTrue(list.contains(m2));
Assert.assertTrue(list.contains(m3));
}
use of org.apache.storm.metricstore.Metric in project storm by apache.
the class RocksDbStore method scanInternal.
// perform a scan given filter options, and return results in either Metric or raw data.
private void scanInternal(FilterOptions filter, ScanCallback scanCallback, RocksDbScanCallback rawCallback) throws MetricException {
Map<String, Integer> stringToIdCache = new HashMap<>();
Map<Integer, String> idToStringCache = new HashMap<>();
int startTopologyId = 0;
int endTopologyId = 0xFFFFFFFF;
String filterTopologyId = filter.getTopologyId();
if (filterTopologyId != null) {
int topologyId = lookupMetadataString(KeyType.TOPOLOGY_STRING, filterTopologyId, stringToIdCache);
if (INVALID_METADATA_STRING_ID == topologyId) {
// string does not exist in database
return;
}
startTopologyId = topologyId;
endTopologyId = topologyId;
}
long startTime = filter.getStartTime();
long endTime = filter.getEndTime();
int startMetricId = 0;
int endMetricId = 0xFFFFFFFF;
String filterMetricName = filter.getMetricName();
if (filterMetricName != null) {
int metricId = lookupMetadataString(KeyType.METRIC_STRING, filterMetricName, stringToIdCache);
if (INVALID_METADATA_STRING_ID == metricId) {
// string does not exist in database
return;
}
startMetricId = metricId;
endMetricId = metricId;
}
int startComponentId = 0;
int endComponentId = 0xFFFFFFFF;
String filterComponentId = filter.getComponentId();
if (filterComponentId != null) {
int componentId = lookupMetadataString(KeyType.COMPONENT_STRING, filterComponentId, stringToIdCache);
if (INVALID_METADATA_STRING_ID == componentId) {
// string does not exist in database
return;
}
startComponentId = componentId;
endComponentId = componentId;
}
int startExecutorId = 0;
int endExecutorId = 0xFFFFFFFF;
String filterExecutorName = filter.getExecutorId();
if (filterExecutorName != null) {
int executorId = lookupMetadataString(KeyType.EXEC_ID_STRING, filterExecutorName, stringToIdCache);
if (INVALID_METADATA_STRING_ID == executorId) {
// string does not exist in database
return;
}
startExecutorId = executorId;
endExecutorId = executorId;
}
int startHostId = 0;
int endHostId = 0xFFFFFFFF;
String filterHostId = filter.getHostId();
if (filterHostId != null) {
int hostId = lookupMetadataString(KeyType.HOST_STRING, filterHostId, stringToIdCache);
if (INVALID_METADATA_STRING_ID == hostId) {
// string does not exist in database
return;
}
startHostId = hostId;
endHostId = hostId;
}
int startPort = 0;
int endPort = 0xFFFFFFFF;
Integer filterPort = filter.getPort();
if (filterPort != null) {
startPort = filterPort;
endPort = filterPort;
}
int startStreamId = 0;
int endStreamId = 0xFFFFFFFF;
String filterStreamId = filter.getStreamId();
if (filterStreamId != null) {
int streamId = lookupMetadataString(KeyType.HOST_STRING, filterStreamId, stringToIdCache);
if (INVALID_METADATA_STRING_ID == streamId) {
// string does not exist in database
return;
}
startStreamId = streamId;
endStreamId = streamId;
}
try (ReadOptions ro = new ReadOptions()) {
ro.setTotalOrderSeek(true);
for (AggLevel aggLevel : filter.getAggLevels()) {
RocksDbKey startKey = RocksDbKey.createMetricKey(aggLevel, startTopologyId, startTime, startMetricId, startComponentId, startExecutorId, startHostId, startPort, startStreamId);
RocksDbKey endKey = RocksDbKey.createMetricKey(aggLevel, endTopologyId, endTime, endMetricId, endComponentId, endExecutorId, endHostId, endPort, endStreamId);
try (RocksIterator iterator = db.newIterator(ro)) {
for (iterator.seek(startKey.getRaw()); iterator.isValid(); iterator.next()) {
RocksDbKey key = new RocksDbKey(iterator.key());
if (key.compareTo(endKey) > 0) {
// past limit, quit
break;
}
if (startTopologyId != 0 && key.getTopologyId() != startTopologyId) {
continue;
}
long timestamp = key.getTimestamp();
if (timestamp < startTime || timestamp > endTime) {
continue;
}
if (startMetricId != 0 && key.getMetricId() != startMetricId) {
continue;
}
if (startComponentId != 0 && key.getComponentId() != startComponentId) {
continue;
}
if (startExecutorId != 0 && key.getExecutorId() != startExecutorId) {
continue;
}
if (startHostId != 0 && key.getHostnameId() != startHostId) {
continue;
}
if (startPort != 0 && key.getPort() != startPort) {
continue;
}
if (startStreamId != 0 && key.getStreamId() != startStreamId) {
continue;
}
RocksDbValue val = new RocksDbValue(iterator.value());
if (scanCallback != null) {
try {
// populate a metric
String metricName = metadataIdToString(KeyType.METRIC_STRING, key.getMetricId(), idToStringCache);
String topologyId = metadataIdToString(KeyType.TOPOLOGY_STRING, key.getTopologyId(), idToStringCache);
String componentId = metadataIdToString(KeyType.COMPONENT_STRING, key.getComponentId(), idToStringCache);
String executorId = metadataIdToString(KeyType.EXEC_ID_STRING, key.getExecutorId(), idToStringCache);
String hostname = metadataIdToString(KeyType.HOST_STRING, key.getHostnameId(), idToStringCache);
String streamId = metadataIdToString(KeyType.STREAM_ID_STRING, key.getStreamId(), idToStringCache);
Metric metric = new Metric(metricName, timestamp, topologyId, 0.0, componentId, executorId, hostname, streamId, key.getPort(), aggLevel);
val.populateMetric(metric);
// callback to caller
scanCallback.cb(metric);
} catch (MetricException e) {
LOG.warn("Failed to report found metric: {}", e.getMessage());
}
} else {
try {
if (!rawCallback.cb(key, val)) {
return;
}
} catch (RocksDBException e) {
throw new MetricException("Error reading metrics data", e);
}
}
}
}
}
}
}
use of org.apache.storm.metricstore.Metric in project storm by apache.
the class RocksDbMetricsWriter method processInsert.
/**
* Performs the actual metric insert, and aggregates over all bucket times.
*
* @param metric Metric to store
* @throws MetricException if database write fails
*/
private void processInsert(Metric metric) throws MetricException {
// convert all strings to numeric Ids for the metric key and add to the metadata cache
long metricTimestamp = metric.getTimestamp();
Integer topologyId = storeMetadataString(KeyType.TOPOLOGY_STRING, metric.getTopologyId(), metricTimestamp);
Integer metricId = storeMetadataString(KeyType.METRIC_STRING, metric.getMetricName(), metricTimestamp);
Integer componentId = storeMetadataString(KeyType.COMPONENT_STRING, metric.getComponentId(), metricTimestamp);
Integer executorId = storeMetadataString(KeyType.EXEC_ID_STRING, metric.getExecutorId(), metricTimestamp);
Integer hostId = storeMetadataString(KeyType.HOST_STRING, metric.getHostname(), metricTimestamp);
Integer streamId = storeMetadataString(KeyType.STREAM_ID_STRING, metric.getStreamId(), metricTimestamp);
RocksDbKey key = RocksDbKey.createMetricKey(AggLevel.AGG_LEVEL_NONE, topologyId, metric.getTimestamp(), metricId, componentId, executorId, hostId, metric.getPort(), streamId);
// save metric key/value to be batched
RocksDbValue value = new RocksDbValue(metric);
insertBatch.put(key, value);
// Aggregate matching metrics over bucket timeframes.
// We'll process starting with the longest bucket. If the metric for this does not exist, we don't have to
// search for the remaining bucket metrics.
ListIterator li = aggBuckets.listIterator(aggBuckets.size());
boolean populate = true;
while (li.hasPrevious()) {
AggLevel bucket = (AggLevel) li.previous();
Metric aggMetric = new Metric(metric);
aggMetric.setAggLevel(bucket);
long msToBucket = 1000L * 60L * bucket.getValue();
long roundedToBucket = msToBucket * (metric.getTimestamp() / msToBucket);
aggMetric.setTimestamp(roundedToBucket);
RocksDbKey aggKey = RocksDbKey.createMetricKey(bucket, topologyId, aggMetric.getTimestamp(), metricId, componentId, executorId, hostId, aggMetric.getPort(), streamId);
if (populate) {
// retrieve any existing aggregation matching this one and update the values
if (store.populateFromKey(aggKey, aggMetric)) {
aggMetric.addValue(metric.getValue());
} else {
// aggregating metric did not exist, don't look for further ones with smaller timestamps
populate = false;
}
}
// save metric key/value to be batched
RocksDbValue aggVal = new RocksDbValue(aggMetric);
insertBatch.put(aggKey, aggVal);
}
processBatchInsert(insertBatch);
insertBatch.clear();
}
use of org.apache.storm.metricstore.Metric in project storm by apache.
the class RocksDbMetricsWriter method run.
/**
* Run routine to wait for metrics on a queue and insert into RocksDB.
*/
@Override
public void run() {
while (!shutdown) {
try {
Metric m = (Metric) queue.take();
processInsert(m);
} catch (Exception e) {
LOG.error("Failed to insert metric", e);
if (this.failureMeter != null) {
this.failureMeter.mark();
}
}
}
}
Aggregations