Search in sources :

Example 46 with RocksDBException

use of org.rocksdb.RocksDBException in project storm by apache.

the class RocksDbStore method metadataIdToString.

// Finds the metadata string that matches the string Id and type provided.  The string should exist, as it is
// referenced from a metric.
private String metadataIdToString(KeyType type, int id, Map<Integer, String> lookupCache) throws MetricException {
    String s = readOnlyStringMetadataCache.getMetadataString(id);
    if (s != null) {
        return s;
    }
    s = lookupCache.get(id);
    if (s != null) {
        return s;
    }
    // get from DB and add to lookup cache
    RocksDbKey key = new RocksDbKey(type, id);
    try {
        byte[] value = db.get(key.getRaw());
        if (value == null) {
            throw new MetricException("Failed to find metadata string for id " + id + " of type " + type);
        }
        RocksDbValue rdbValue = new RocksDbValue(value);
        s = rdbValue.getMetdataString();
        lookupCache.put(id, s);
        return s;
    } catch (RocksDBException e) {
        if (this.failureMeter != null) {
            this.failureMeter.mark();
        }
        throw new MetricException("Failed to get from RocksDb", e);
    }
}
Also used : RocksDBException(org.rocksdb.RocksDBException) MetricException(org.apache.storm.metricstore.MetricException)

Example 47 with RocksDBException

use of org.rocksdb.RocksDBException in project storm by apache.

the class RocksDbStore method deleteMetadataBefore.

// deletes metadata strings before the provided timestamp
void deleteMetadataBefore(long firstValidTimestamp) throws MetricException {
    if (firstValidTimestamp < 1L) {
        if (this.failureMeter != null) {
            this.failureMeter.mark();
        }
        throw new MetricException("Invalid timestamp for deleting metadata: " + firstValidTimestamp);
    }
    try (WriteBatch writeBatch = new WriteBatch();
        WriteOptions writeOps = new WriteOptions()) {
        // search all metadata strings
        RocksDbKey topologyMetadataPrefix = RocksDbKey.getPrefix(KeyType.METADATA_STRING_START);
        RocksDbKey lastPrefix = RocksDbKey.getPrefix(KeyType.METADATA_STRING_END);
        try {
            scanRange(topologyMetadataPrefix, lastPrefix, (key, value) -> {
                // we'll assume the metadata was recently used if still in the cache.
                if (!readOnlyStringMetadataCache.contains(key.getMetadataStringId())) {
                    if (value.getLastTimestamp() < firstValidTimestamp) {
                        writeBatch.delete(key.getRaw());
                    }
                }
                return true;
            });
        } catch (RocksDBException e) {
            throw new MetricException("Error reading metric data", e);
        }
        if (writeBatch.count() > 0) {
            LOG.info("Deleting {} metadata strings", writeBatch.count());
            try {
                db.write(writeOps, writeBatch);
            } catch (Exception e) {
                String message = "Failed delete metadata strings";
                LOG.error(message, e);
                if (this.failureMeter != null) {
                    this.failureMeter.mark();
                }
                throw new MetricException(message, e);
            }
        }
    }
}
Also used : WriteOptions(org.rocksdb.WriteOptions) RocksDBException(org.rocksdb.RocksDBException) WriteBatch(org.rocksdb.WriteBatch) RocksDBException(org.rocksdb.RocksDBException) MetricException(org.apache.storm.metricstore.MetricException) MetricException(org.apache.storm.metricstore.MetricException)

Example 48 with RocksDBException

use of org.rocksdb.RocksDBException in project storm by apache.

the class RocksDbStore method lookupMetadataString.

// attempts to lookup the unique Id for a string that may not exist yet.  Returns INVALID_METADATA_STRING_ID
// if it does not exist.
private int lookupMetadataString(KeyType type, String s, Map<String, Integer> lookupCache) throws MetricException {
    if (s == null) {
        if (this.failureMeter != null) {
            this.failureMeter.mark();
        }
        throw new MetricException("No string for metric metadata string type " + type);
    }
    // attempt to find it in the string cache, this will update the LRU
    StringMetadata stringMetadata = readOnlyStringMetadataCache.get(s);
    if (stringMetadata != null) {
        return stringMetadata.getStringId();
    }
    // attempt to find it in callers cache
    Integer id = lookupCache.get(s);
    if (id != null) {
        return id;
    }
    // attempt to find the string in the database
    try {
        stringMetadata = rocksDbGetStringMetadata(type, s);
    } catch (RocksDBException e) {
        throw new MetricException("Error reading metric data", e);
    }
    if (stringMetadata != null) {
        id = stringMetadata.getStringId();
        // add to the callers cache.  We can't add it to the stringMetadataCache, since that could cause an eviction
        // database write, which we want to only occur from the inserting DB thread.
        lookupCache.put(s, id);
        return id;
    }
    // string does not exist
    return INVALID_METADATA_STRING_ID;
}
Also used : RocksDBException(org.rocksdb.RocksDBException) MetricException(org.apache.storm.metricstore.MetricException)

Example 49 with RocksDBException

use of org.rocksdb.RocksDBException in project storm by apache.

the class RocksDbStore method scanInternal.

// perform a scan given filter options, and return results in either Metric or raw data.
private void scanInternal(FilterOptions filter, ScanCallback scanCallback, RocksDbScanCallback rawCallback) throws MetricException {
    Map<String, Integer> stringToIdCache = new HashMap<>();
    Map<Integer, String> idToStringCache = new HashMap<>();
    int startTopologyId = 0;
    int endTopologyId = 0xFFFFFFFF;
    String filterTopologyId = filter.getTopologyId();
    if (filterTopologyId != null) {
        int topologyId = lookupMetadataString(KeyType.TOPOLOGY_STRING, filterTopologyId, stringToIdCache);
        if (INVALID_METADATA_STRING_ID == topologyId) {
            // string does not exist in database
            return;
        }
        startTopologyId = topologyId;
        endTopologyId = topologyId;
    }
    long startTime = filter.getStartTime();
    long endTime = filter.getEndTime();
    int startMetricId = 0;
    int endMetricId = 0xFFFFFFFF;
    String filterMetricName = filter.getMetricName();
    if (filterMetricName != null) {
        int metricId = lookupMetadataString(KeyType.METRIC_STRING, filterMetricName, stringToIdCache);
        if (INVALID_METADATA_STRING_ID == metricId) {
            // string does not exist in database
            return;
        }
        startMetricId = metricId;
        endMetricId = metricId;
    }
    int startComponentId = 0;
    int endComponentId = 0xFFFFFFFF;
    String filterComponentId = filter.getComponentId();
    if (filterComponentId != null) {
        int componentId = lookupMetadataString(KeyType.COMPONENT_STRING, filterComponentId, stringToIdCache);
        if (INVALID_METADATA_STRING_ID == componentId) {
            // string does not exist in database
            return;
        }
        startComponentId = componentId;
        endComponentId = componentId;
    }
    int startExecutorId = 0;
    int endExecutorId = 0xFFFFFFFF;
    String filterExecutorName = filter.getExecutorId();
    if (filterExecutorName != null) {
        int executorId = lookupMetadataString(KeyType.EXEC_ID_STRING, filterExecutorName, stringToIdCache);
        if (INVALID_METADATA_STRING_ID == executorId) {
            // string does not exist in database
            return;
        }
        startExecutorId = executorId;
        endExecutorId = executorId;
    }
    int startHostId = 0;
    int endHostId = 0xFFFFFFFF;
    String filterHostId = filter.getHostId();
    if (filterHostId != null) {
        int hostId = lookupMetadataString(KeyType.HOST_STRING, filterHostId, stringToIdCache);
        if (INVALID_METADATA_STRING_ID == hostId) {
            // string does not exist in database
            return;
        }
        startHostId = hostId;
        endHostId = hostId;
    }
    int startPort = 0;
    int endPort = 0xFFFFFFFF;
    Integer filterPort = filter.getPort();
    if (filterPort != null) {
        startPort = filterPort;
        endPort = filterPort;
    }
    int startStreamId = 0;
    int endStreamId = 0xFFFFFFFF;
    String filterStreamId = filter.getStreamId();
    if (filterStreamId != null) {
        int streamId = lookupMetadataString(KeyType.HOST_STRING, filterStreamId, stringToIdCache);
        if (INVALID_METADATA_STRING_ID == streamId) {
            // string does not exist in database
            return;
        }
        startStreamId = streamId;
        endStreamId = streamId;
    }
    try (ReadOptions ro = new ReadOptions()) {
        ro.setTotalOrderSeek(true);
        for (AggLevel aggLevel : filter.getAggLevels()) {
            RocksDbKey startKey = RocksDbKey.createMetricKey(aggLevel, startTopologyId, startTime, startMetricId, startComponentId, startExecutorId, startHostId, startPort, startStreamId);
            RocksDbKey endKey = RocksDbKey.createMetricKey(aggLevel, endTopologyId, endTime, endMetricId, endComponentId, endExecutorId, endHostId, endPort, endStreamId);
            try (RocksIterator iterator = db.newIterator(ro)) {
                for (iterator.seek(startKey.getRaw()); iterator.isValid(); iterator.next()) {
                    RocksDbKey key = new RocksDbKey(iterator.key());
                    if (key.compareTo(endKey) > 0) {
                        // past limit, quit
                        break;
                    }
                    if (startTopologyId != 0 && key.getTopologyId() != startTopologyId) {
                        continue;
                    }
                    long timestamp = key.getTimestamp();
                    if (timestamp < startTime || timestamp > endTime) {
                        continue;
                    }
                    if (startMetricId != 0 && key.getMetricId() != startMetricId) {
                        continue;
                    }
                    if (startComponentId != 0 && key.getComponentId() != startComponentId) {
                        continue;
                    }
                    if (startExecutorId != 0 && key.getExecutorId() != startExecutorId) {
                        continue;
                    }
                    if (startHostId != 0 && key.getHostnameId() != startHostId) {
                        continue;
                    }
                    if (startPort != 0 && key.getPort() != startPort) {
                        continue;
                    }
                    if (startStreamId != 0 && key.getStreamId() != startStreamId) {
                        continue;
                    }
                    RocksDbValue val = new RocksDbValue(iterator.value());
                    if (scanCallback != null) {
                        try {
                            // populate a metric
                            String metricName = metadataIdToString(KeyType.METRIC_STRING, key.getMetricId(), idToStringCache);
                            String topologyId = metadataIdToString(KeyType.TOPOLOGY_STRING, key.getTopologyId(), idToStringCache);
                            String componentId = metadataIdToString(KeyType.COMPONENT_STRING, key.getComponentId(), idToStringCache);
                            String executorId = metadataIdToString(KeyType.EXEC_ID_STRING, key.getExecutorId(), idToStringCache);
                            String hostname = metadataIdToString(KeyType.HOST_STRING, key.getHostnameId(), idToStringCache);
                            String streamId = metadataIdToString(KeyType.STREAM_ID_STRING, key.getStreamId(), idToStringCache);
                            Metric metric = new Metric(metricName, timestamp, topologyId, 0.0, componentId, executorId, hostname, streamId, key.getPort(), aggLevel);
                            val.populateMetric(metric);
                            // callback to caller
                            scanCallback.cb(metric);
                        } catch (MetricException e) {
                            LOG.warn("Failed to report found metric: {}", e.getMessage());
                        }
                    } else {
                        try {
                            if (!rawCallback.cb(key, val)) {
                                return;
                            }
                        } catch (RocksDBException e) {
                            throw new MetricException("Error reading metrics data", e);
                        }
                    }
                }
            }
        }
    }
}
Also used : RocksDBException(org.rocksdb.RocksDBException) HashMap(java.util.HashMap) RocksIterator(org.rocksdb.RocksIterator) MetricException(org.apache.storm.metricstore.MetricException) AggLevel(org.apache.storm.metricstore.AggLevel) ReadOptions(org.rocksdb.ReadOptions) Metric(org.apache.storm.metricstore.Metric)

Example 50 with RocksDBException

use of org.rocksdb.RocksDBException in project storm by apache.

the class RocksDbMetricsWriter method storeMetadataString.

// converts a metadata string into a unique integer.  Updates the timestamp of the string
// so we can track when it was last used for later deletion on database cleanup.
private int storeMetadataString(KeyType type, String s, long metricTimestamp) throws MetricException {
    if (s == null) {
        throw new MetricException("No string for metric metadata string type " + type);
    }
    // attempt to find it in the string cache
    StringMetadata stringMetadata = stringMetadataCache.get(s);
    if (stringMetadata != null) {
        // make sure the timestamp on the metadata has the latest time
        stringMetadata.update(metricTimestamp, type);
        return stringMetadata.getStringId();
    }
    // attempt to find the string in the database
    try {
        stringMetadata = store.rocksDbGetStringMetadata(type, s);
    } catch (RocksDBException e) {
        throw new MetricException("Error reading metrics data", e);
    }
    if (stringMetadata != null) {
        // update to the latest timestamp and add to the string cache
        stringMetadata.update(metricTimestamp, type);
        stringMetadataCache.put(s, stringMetadata, false);
        return stringMetadata.getStringId();
    }
    // string does not exist, create using an unique string id and add to cache
    if (LOG.isDebugEnabled()) {
        LOG.debug(type + "." + s + " does not exist in cache or database");
    }
    int stringId = getUniqueMetadataStringId();
    stringMetadata = new StringMetadata(type, stringId, metricTimestamp);
    stringMetadataCache.put(s, stringMetadata, true);
    return stringMetadata.getStringId();
}
Also used : RocksDBException(org.rocksdb.RocksDBException) MetricException(org.apache.storm.metricstore.MetricException)

Aggregations

RocksDBException (org.rocksdb.RocksDBException)66 IOException (java.io.IOException)17 ColumnFamilyHandle (org.rocksdb.ColumnFamilyHandle)17 ProcessorStateException (org.apache.kafka.streams.errors.ProcessorStateException)11 ColumnFamilyDescriptor (org.rocksdb.ColumnFamilyDescriptor)11 ArrayList (java.util.ArrayList)10 WriteBatch (org.rocksdb.WriteBatch)10 HashMap (java.util.HashMap)8 Map (java.util.Map)8 MetricException (org.apache.storm.metricstore.MetricException)8 WriteOptions (org.rocksdb.WriteOptions)7 Options (org.rocksdb.Options)6 File (java.io.File)5 DBOptions (org.rocksdb.DBOptions)5 FlushOptions (org.rocksdb.FlushOptions)5 RocksDB (org.rocksdb.RocksDB)5 DataInputViewStreamWrapper (org.apache.flink.core.memory.DataInputViewStreamWrapper)4 ColumnFamilyOptions (org.rocksdb.ColumnFamilyOptions)4 ReadOptions (org.rocksdb.ReadOptions)4 RocksIterator (org.rocksdb.RocksIterator)4