use of org.apache.storm.metricstore.MetricException in project storm by apache.
the class Container method processMetrics.
/**
* Send worker metrics to Nimbus.
*/
void processMetrics(OnlyLatestExecutor<Integer> exec, WorkerMetricsProcessor processor) {
try {
Optional<Long> usedMemoryForPort = containerMemoryTracker.getUsedMemoryMb(port);
if (usedMemoryForPort.isPresent()) {
// Make sure we don't process too frequently.
long nextMetricProcessTime = this.lastMetricProcessTime + 60L * 1000L;
long currentTimeMsec = System.currentTimeMillis();
if (currentTimeMsec < nextMetricProcessTime) {
return;
}
String hostname = Utils.hostname();
// create metric for memory
long timestamp = System.currentTimeMillis();
WorkerMetricPoint workerMetric = new WorkerMetricPoint(MEMORY_USED_METRIC, timestamp, usedMemoryForPort.get(), SYSTEM_COMPONENT_ID, INVALID_EXECUTOR_ID, INVALID_STREAM_ID);
WorkerMetricList metricList = new WorkerMetricList();
metricList.add_to_metrics(workerMetric);
WorkerMetrics metrics = new WorkerMetrics(topologyId, port, hostname, metricList);
exec.execute(port, () -> {
try {
processor.processWorkerMetrics(conf, metrics);
} catch (MetricException e) {
LOG.error("Failed to process metrics", e);
}
});
}
} catch (Exception e) {
LOG.error("Failed to process metrics", e);
} finally {
this.lastMetricProcessTime = System.currentTimeMillis();
}
}
use of org.apache.storm.metricstore.MetricException in project storm by apache.
the class RocksDbStore method populateFromKey.
// populate metric values using the provided key
boolean populateFromKey(RocksDbKey key, Metric metric) throws MetricException {
try {
byte[] value = db.get(key.getRaw());
if (value == null) {
return false;
}
RocksDbValue rdbValue = new RocksDbValue(value);
rdbValue.populateMetric(metric);
} catch (Exception e) {
String message = "Failed to populate metric";
LOG.error(message, e);
if (this.failureMeter != null) {
this.failureMeter.mark();
}
throw new MetricException(message, e);
}
return true;
}
use of org.apache.storm.metricstore.MetricException in project storm by apache.
the class RocksDbMetricsWriter method close.
@Override
public void close() {
this.shutdown = true;
// get all metadata from the cache to put into the database
// use a new map to prevent threading issues with writer thread
TreeMap<RocksDbKey, RocksDbValue> batchMap = new TreeMap<>();
for (Map.Entry entry : stringMetadataCache.entrySet()) {
String metadataString = (String) entry.getKey();
StringMetadata val = (StringMetadata) entry.getValue();
RocksDbValue rval = new RocksDbValue(val.getLastTimestamp(), metadataString);
for (KeyType type : val.getMetadataTypes()) {
// save the metadata for all types of strings it matches
RocksDbKey rkey = new RocksDbKey(type, val.getStringId());
batchMap.put(rkey, rval);
}
}
try {
processBatchInsert(batchMap);
} catch (MetricException e) {
LOG.error("Failed to insert all metadata", e);
}
// flush db to disk
try (FlushOptions flushOps = new FlushOptions()) {
flushOps.setWaitForFlush(true);
store.db.flush(flushOps);
} catch (RocksDBException e) {
LOG.error("Failed ot flush RocksDB", e);
if (this.failureMeter != null) {
this.failureMeter.mark();
}
}
}
use of org.apache.storm.metricstore.MetricException in project storm by apache.
the class RocksDbMetricsWriter method generateUniqueStringIds.
// guarantees a list of unused string Ids exists. Once the list is empty, creates a new list
// by generating a list of random numbers and removing the ones that already are in use.
private void generateUniqueStringIds() throws MetricException {
int attempts = 0;
while (unusedIds.isEmpty()) {
attempts++;
if (attempts > 100) {
String message = "Failed to generate unique ids";
LOG.error(message);
throw new MetricException(message);
}
for (int i = 0; i < 600; i++) {
int n = ThreadLocalRandom.current().nextInt();
if (n == RocksDbStore.INVALID_METADATA_STRING_ID) {
continue;
}
// remove any entries in the cache
if (stringMetadataCache.contains(n)) {
continue;
}
unusedIds.add(n);
}
// now scan all metadata and remove any matching string Ids from this list
RocksDbKey firstPrefix = RocksDbKey.getPrefix(KeyType.METADATA_STRING_START);
RocksDbKey lastPrefix = RocksDbKey.getPrefix(KeyType.METADATA_STRING_END);
try {
store.scanRange(firstPrefix, lastPrefix, (key, value) -> {
unusedIds.remove(key.getMetadataStringId());
// process all metadata
return true;
});
} catch (RocksDBException e) {
throw new MetricException("Error reading metrics data", e);
}
}
}
use of org.apache.storm.metricstore.MetricException in project storm by apache.
the class StringMetadataCache method put.
/**
* Add the string metadata to the cache.
*
* <p>NOTE: this can cause data to be evicted from the cache when full. When this occurs, the evictionCallback() method
* is called to store the metadata back into the RocksDB database.
*
* <p>This method is only exposed to the WritableStringMetadataCache interface.
*
* @param s The string to add
* @param stringMetadata The string's metadata
* @param newEntry Indicates the metadata is being used for the first time and should be written to RocksDB immediately
* @throws MetricException when evicted data fails to save to the database or when the database is shutdown
*/
@Override
public void put(String s, StringMetadata stringMetadata, boolean newEntry) throws MetricException {
if (dbWriter.isShutdown()) {
// another thread could be writing out the metadata cache to the database.
throw new MetricException("Shutting down");
}
try {
if (newEntry) {
writeMetadataToDisk(s, stringMetadata);
}
lruStringCache.put(s, stringMetadata);
hashToString.put(stringMetadata.getStringId(), s);
} catch (Exception e) {
// catch any runtime exceptions caused by eviction
throw new MetricException("Failed to save string in metadata cache", e);
}
}
Aggregations