use of org.apache.storm.metricstore.MetricException in project storm by apache.
the class RocksDbStore method prepare.
/**
* Create metric store instance using the configurations provided via the config map.
*
* @param config Storm config map
* @param metricsRegistry The Nimbus daemon metrics registry
* @throws MetricException on preparation error
*/
@Override
public void prepare(Map<String, Object> config, StormMetricsRegistry metricsRegistry) throws MetricException {
validateConfig(config);
this.failureMeter = metricsRegistry.registerMeter("RocksDB:metric-failures");
RocksDB.loadLibrary();
boolean createIfMissing = ObjectReader.getBoolean(config.get(DaemonConfig.STORM_ROCKSDB_CREATE_IF_MISSING), false);
try (Options options = new Options().setCreateIfMissing(createIfMissing)) {
// use the hash index for prefix searches
BlockBasedTableConfig tfc = new BlockBasedTableConfig();
tfc.setIndexType(IndexType.kHashSearch);
options.setTableFormatConfig(tfc);
options.useCappedPrefixExtractor(RocksDbKey.KEY_SIZE);
String path = getRocksDbAbsoluteDir(config);
LOG.info("Opening RocksDB from {}", path);
db = RocksDB.open(options, path);
} catch (RocksDBException e) {
String message = "Error opening RockDB database";
LOG.error(message, e);
throw new MetricException(message, e);
}
// create thread to delete old metrics and metadata
Integer retentionHours = Integer.parseInt(config.get(DaemonConfig.STORM_ROCKSDB_METRIC_RETENTION_HOURS).toString());
Integer deletionPeriod = 0;
if (config.containsKey(DaemonConfig.STORM_ROCKSDB_METRIC_DELETION_PERIOD_HOURS)) {
deletionPeriod = Integer.parseInt(config.get(DaemonConfig.STORM_ROCKSDB_METRIC_DELETION_PERIOD_HOURS).toString());
}
metricsCleaner = new MetricsCleaner(this, retentionHours, deletionPeriod, failureMeter, metricsRegistry);
// create thread to process insertion of all metrics
metricsWriter = new RocksDbMetricsWriter(this, this.queue, this.failureMeter);
int cacheCapacity = Integer.parseInt(config.get(DaemonConfig.STORM_ROCKSDB_METADATA_STRING_CACHE_CAPACITY).toString());
StringMetadataCache.init(metricsWriter, cacheCapacity);
readOnlyStringMetadataCache = StringMetadataCache.getReadOnlyStringMetadataCache();
// init the writer once the cache is setup
metricsWriter.init();
// start threads after metadata cache created
Thread thread = new Thread(metricsCleaner, "RocksDbMetricsCleaner");
thread.setDaemon(true);
thread.start();
thread = new Thread(metricsWriter, "RocksDbMetricsWriter");
thread.setDaemon(true);
thread.start();
}
use of org.apache.storm.metricstore.MetricException in project storm by apache.
the class RocksDbStore method metadataIdToString.
// Finds the metadata string that matches the string Id and type provided. The string should exist, as it is
// referenced from a metric.
private String metadataIdToString(KeyType type, int id, Map<Integer, String> lookupCache) throws MetricException {
String s = readOnlyStringMetadataCache.getMetadataString(id);
if (s != null) {
return s;
}
s = lookupCache.get(id);
if (s != null) {
return s;
}
// get from DB and add to lookup cache
RocksDbKey key = new RocksDbKey(type, id);
try {
byte[] value = db.get(key.getRaw());
if (value == null) {
throw new MetricException("Failed to find metadata string for id " + id + " of type " + type);
}
RocksDbValue rdbValue = new RocksDbValue(value);
s = rdbValue.getMetdataString();
lookupCache.put(id, s);
return s;
} catch (RocksDBException e) {
if (this.failureMeter != null) {
this.failureMeter.mark();
}
throw new MetricException("Failed to get from RocksDb", e);
}
}
use of org.apache.storm.metricstore.MetricException in project storm by apache.
the class RocksDbStore method deleteMetadataBefore.
// deletes metadata strings before the provided timestamp
void deleteMetadataBefore(long firstValidTimestamp) throws MetricException {
if (firstValidTimestamp < 1L) {
if (this.failureMeter != null) {
this.failureMeter.mark();
}
throw new MetricException("Invalid timestamp for deleting metadata: " + firstValidTimestamp);
}
try (WriteBatch writeBatch = new WriteBatch();
WriteOptions writeOps = new WriteOptions()) {
// search all metadata strings
RocksDbKey topologyMetadataPrefix = RocksDbKey.getPrefix(KeyType.METADATA_STRING_START);
RocksDbKey lastPrefix = RocksDbKey.getPrefix(KeyType.METADATA_STRING_END);
try {
scanRange(topologyMetadataPrefix, lastPrefix, (key, value) -> {
// we'll assume the metadata was recently used if still in the cache.
if (!readOnlyStringMetadataCache.contains(key.getMetadataStringId())) {
if (value.getLastTimestamp() < firstValidTimestamp) {
writeBatch.delete(key.getRaw());
}
}
return true;
});
} catch (RocksDBException e) {
throw new MetricException("Error reading metric data", e);
}
if (writeBatch.count() > 0) {
LOG.info("Deleting {} metadata strings", writeBatch.count());
try {
db.write(writeOps, writeBatch);
} catch (Exception e) {
String message = "Failed delete metadata strings";
LOG.error(message, e);
if (this.failureMeter != null) {
this.failureMeter.mark();
}
throw new MetricException(message, e);
}
}
}
}
use of org.apache.storm.metricstore.MetricException in project storm by apache.
the class RocksDbStore method lookupMetadataString.
// attempts to lookup the unique Id for a string that may not exist yet. Returns INVALID_METADATA_STRING_ID
// if it does not exist.
private int lookupMetadataString(KeyType type, String s, Map<String, Integer> lookupCache) throws MetricException {
if (s == null) {
if (this.failureMeter != null) {
this.failureMeter.mark();
}
throw new MetricException("No string for metric metadata string type " + type);
}
// attempt to find it in the string cache, this will update the LRU
StringMetadata stringMetadata = readOnlyStringMetadataCache.get(s);
if (stringMetadata != null) {
return stringMetadata.getStringId();
}
// attempt to find it in callers cache
Integer id = lookupCache.get(s);
if (id != null) {
return id;
}
// attempt to find the string in the database
try {
stringMetadata = rocksDbGetStringMetadata(type, s);
} catch (RocksDBException e) {
throw new MetricException("Error reading metric data", e);
}
if (stringMetadata != null) {
id = stringMetadata.getStringId();
// add to the callers cache. We can't add it to the stringMetadataCache, since that could cause an eviction
// database write, which we want to only occur from the inserting DB thread.
lookupCache.put(s, id);
return id;
}
// string does not exist
return INVALID_METADATA_STRING_ID;
}
use of org.apache.storm.metricstore.MetricException in project storm by apache.
the class RocksDbStore method scanInternal.
// perform a scan given filter options, and return results in either Metric or raw data.
private void scanInternal(FilterOptions filter, ScanCallback scanCallback, RocksDbScanCallback rawCallback) throws MetricException {
Map<String, Integer> stringToIdCache = new HashMap<>();
Map<Integer, String> idToStringCache = new HashMap<>();
int startTopologyId = 0;
int endTopologyId = 0xFFFFFFFF;
String filterTopologyId = filter.getTopologyId();
if (filterTopologyId != null) {
int topologyId = lookupMetadataString(KeyType.TOPOLOGY_STRING, filterTopologyId, stringToIdCache);
if (INVALID_METADATA_STRING_ID == topologyId) {
// string does not exist in database
return;
}
startTopologyId = topologyId;
endTopologyId = topologyId;
}
long startTime = filter.getStartTime();
long endTime = filter.getEndTime();
int startMetricId = 0;
int endMetricId = 0xFFFFFFFF;
String filterMetricName = filter.getMetricName();
if (filterMetricName != null) {
int metricId = lookupMetadataString(KeyType.METRIC_STRING, filterMetricName, stringToIdCache);
if (INVALID_METADATA_STRING_ID == metricId) {
// string does not exist in database
return;
}
startMetricId = metricId;
endMetricId = metricId;
}
int startComponentId = 0;
int endComponentId = 0xFFFFFFFF;
String filterComponentId = filter.getComponentId();
if (filterComponentId != null) {
int componentId = lookupMetadataString(KeyType.COMPONENT_STRING, filterComponentId, stringToIdCache);
if (INVALID_METADATA_STRING_ID == componentId) {
// string does not exist in database
return;
}
startComponentId = componentId;
endComponentId = componentId;
}
int startExecutorId = 0;
int endExecutorId = 0xFFFFFFFF;
String filterExecutorName = filter.getExecutorId();
if (filterExecutorName != null) {
int executorId = lookupMetadataString(KeyType.EXEC_ID_STRING, filterExecutorName, stringToIdCache);
if (INVALID_METADATA_STRING_ID == executorId) {
// string does not exist in database
return;
}
startExecutorId = executorId;
endExecutorId = executorId;
}
int startHostId = 0;
int endHostId = 0xFFFFFFFF;
String filterHostId = filter.getHostId();
if (filterHostId != null) {
int hostId = lookupMetadataString(KeyType.HOST_STRING, filterHostId, stringToIdCache);
if (INVALID_METADATA_STRING_ID == hostId) {
// string does not exist in database
return;
}
startHostId = hostId;
endHostId = hostId;
}
int startPort = 0;
int endPort = 0xFFFFFFFF;
Integer filterPort = filter.getPort();
if (filterPort != null) {
startPort = filterPort;
endPort = filterPort;
}
int startStreamId = 0;
int endStreamId = 0xFFFFFFFF;
String filterStreamId = filter.getStreamId();
if (filterStreamId != null) {
int streamId = lookupMetadataString(KeyType.HOST_STRING, filterStreamId, stringToIdCache);
if (INVALID_METADATA_STRING_ID == streamId) {
// string does not exist in database
return;
}
startStreamId = streamId;
endStreamId = streamId;
}
try (ReadOptions ro = new ReadOptions()) {
ro.setTotalOrderSeek(true);
for (AggLevel aggLevel : filter.getAggLevels()) {
RocksDbKey startKey = RocksDbKey.createMetricKey(aggLevel, startTopologyId, startTime, startMetricId, startComponentId, startExecutorId, startHostId, startPort, startStreamId);
RocksDbKey endKey = RocksDbKey.createMetricKey(aggLevel, endTopologyId, endTime, endMetricId, endComponentId, endExecutorId, endHostId, endPort, endStreamId);
try (RocksIterator iterator = db.newIterator(ro)) {
for (iterator.seek(startKey.getRaw()); iterator.isValid(); iterator.next()) {
RocksDbKey key = new RocksDbKey(iterator.key());
if (key.compareTo(endKey) > 0) {
// past limit, quit
break;
}
if (startTopologyId != 0 && key.getTopologyId() != startTopologyId) {
continue;
}
long timestamp = key.getTimestamp();
if (timestamp < startTime || timestamp > endTime) {
continue;
}
if (startMetricId != 0 && key.getMetricId() != startMetricId) {
continue;
}
if (startComponentId != 0 && key.getComponentId() != startComponentId) {
continue;
}
if (startExecutorId != 0 && key.getExecutorId() != startExecutorId) {
continue;
}
if (startHostId != 0 && key.getHostnameId() != startHostId) {
continue;
}
if (startPort != 0 && key.getPort() != startPort) {
continue;
}
if (startStreamId != 0 && key.getStreamId() != startStreamId) {
continue;
}
RocksDbValue val = new RocksDbValue(iterator.value());
if (scanCallback != null) {
try {
// populate a metric
String metricName = metadataIdToString(KeyType.METRIC_STRING, key.getMetricId(), idToStringCache);
String topologyId = metadataIdToString(KeyType.TOPOLOGY_STRING, key.getTopologyId(), idToStringCache);
String componentId = metadataIdToString(KeyType.COMPONENT_STRING, key.getComponentId(), idToStringCache);
String executorId = metadataIdToString(KeyType.EXEC_ID_STRING, key.getExecutorId(), idToStringCache);
String hostname = metadataIdToString(KeyType.HOST_STRING, key.getHostnameId(), idToStringCache);
String streamId = metadataIdToString(KeyType.STREAM_ID_STRING, key.getStreamId(), idToStringCache);
Metric metric = new Metric(metricName, timestamp, topologyId, 0.0, componentId, executorId, hostname, streamId, key.getPort(), aggLevel);
val.populateMetric(metric);
// callback to caller
scanCallback.cb(metric);
} catch (MetricException e) {
LOG.warn("Failed to report found metric: {}", e.getMessage());
}
} else {
try {
if (!rawCallback.cb(key, val)) {
return;
}
} catch (RocksDBException e) {
throw new MetricException("Error reading metrics data", e);
}
}
}
}
}
}
}
Aggregations