Search in sources :

Example 6 with RocksDBException

use of org.rocksdb.RocksDBException in project voldemort by voldemort.

the class RocksDbStorageEngine method getValueForKey.

private List<Versioned<byte[]>> getValueForKey(ByteArray key, byte[] transforms) throws PersistenceFailureException {
    long startTimeNs = -1;
    if (logger.isTraceEnabled())
        startTimeNs = System.nanoTime();
    List<Versioned<byte[]>> value = null;
    try {
        byte[] result = getRocksDB().get(storeHandle, key.get());
        if (result != null) {
            value = StoreBinaryFormat.fromByteArray(result);
        } else {
            return Collections.emptyList();
        }
    } catch (RocksDBException e) {
        logger.error(e);
        throw new PersistenceFailureException(e);
    } finally {
        if (logger.isTraceEnabled()) {
            logger.trace("Completed GET (" + getName() + ") from key " + key + " (keyRef: " + System.identityHashCode(key) + ") in " + (System.nanoTime() - startTimeNs) + " ns at " + System.currentTimeMillis());
        }
    }
    return value;
}
Also used : RocksDBException(org.rocksdb.RocksDBException) Versioned(voldemort.versioning.Versioned) PersistenceFailureException(voldemort.store.PersistenceFailureException)

Example 7 with RocksDBException

use of org.rocksdb.RocksDBException in project voldemort by voldemort.

the class RocksDbStorageEngine method delete.

@Override
public boolean delete(ByteArray key, Version version) throws PersistenceFailureException {
    StoreUtils.assertValidKey(key);
    long startTimeNs = -1;
    if (logger.isTraceEnabled())
        startTimeNs = System.nanoTime();
    synchronized (this.locks.lockFor(key.get())) {
        try {
            byte[] value = getRocksDB().get(storeHandle, key.get());
            if (value == null) {
                return false;
            }
            if (version == null) {
                // unversioned delete. Just blow away the whole thing
                getRocksDB().remove(storeHandle, key.get());
                return true;
            } else {
                // versioned deletes; need to determine what to delete
                List<Versioned<byte[]>> vals = StoreBinaryFormat.fromByteArray(value);
                Iterator<Versioned<byte[]>> iter = vals.iterator();
                int numVersions = vals.size();
                int numDeletedVersions = 0;
                // supplied version
                while (iter.hasNext()) {
                    Versioned<byte[]> curr = iter.next();
                    Version currentVersion = curr.getVersion();
                    if (currentVersion.compare(version) == Occurred.BEFORE) {
                        iter.remove();
                        numDeletedVersions++;
                    }
                }
                if (numDeletedVersions < numVersions) {
                    // we still have some valid versions
                    value = StoreBinaryFormat.toByteArray(vals);
                    getRocksDB().put(storeHandle, key.get(), value);
                } else {
                    // we have deleted all the versions; so get rid of the
                    // entry
                    // in the database
                    getRocksDB().remove(storeHandle, key.get());
                }
                return numDeletedVersions > 0;
            }
        } catch (RocksDBException e) {
            logger.error(e);
            throw new PersistenceFailureException(e);
        } finally {
            if (logger.isTraceEnabled()) {
                logger.trace("Completed DELETE (" + getName() + ") of key " + ByteUtils.toHexString(key.get()) + " (keyRef: " + System.identityHashCode(key) + ") in " + (System.nanoTime() - startTimeNs) + " ns at " + System.currentTimeMillis());
            }
        }
    }
}
Also used : RocksDBException(org.rocksdb.RocksDBException) Versioned(voldemort.versioning.Versioned) Version(voldemort.versioning.Version) PersistenceFailureException(voldemort.store.PersistenceFailureException)

Example 8 with RocksDBException

use of org.rocksdb.RocksDBException in project voldemort by voldemort.

the class RocksDbStorageEngine method multiVersionPut.

@Override
public List<Versioned<byte[]>> multiVersionPut(ByteArray key, List<Versioned<byte[]>> values) {
    // TODO Implement getandLock() and putAndUnlock() and then remove this
    // method
    StoreUtils.assertValidKey(key);
    long startTimeNs = -1;
    if (logger.isTraceEnabled())
        startTimeNs = System.nanoTime();
    List<Versioned<byte[]>> currentValues = null;
    List<Versioned<byte[]>> obsoleteVals = null;
    synchronized (this.locks.lockFor(key.get())) {
        /*
             * Get the existing values. Make sure to "get" from the underlying
             * storage instead of using the get method described in this class.
             * Invoking the get method from this class will unnecessarily double
             * prefix the key in case of PartitionPrefixedRocksdbStorageEngine
             * and can cause unpredictable results.
             */
        try {
            byte[] result = getRocksDB().get(storeHandle, key.get());
            if (result != null) {
                currentValues = StoreBinaryFormat.fromByteArray(result);
            } else {
                currentValues = new ArrayList<Versioned<byte[]>>();
            }
        } catch (RocksDBException e) {
            logger.error(e);
            throw new PersistenceFailureException(e);
        }
        obsoleteVals = resolveAndConstructVersionsToPersist(currentValues, values);
        try {
            getRocksDB().put(storeHandle, key.get(), StoreBinaryFormat.toByteArray(currentValues));
        } catch (RocksDBException e) {
            logger.error(e);
            throw new PersistenceFailureException(e);
        } finally {
            if (logger.isTraceEnabled()) {
                String valueStr = "";
                for (Versioned<byte[]> val : currentValues) {
                    valueStr += val + ",";
                }
                logger.trace("Completed PUT (" + getName() + ") to key " + key + " (keyRef: " + System.identityHashCode(key) + " values " + valueStr + " in " + (System.nanoTime() - startTimeNs) + " ns at " + System.currentTimeMillis());
            }
        }
    }
    return obsoleteVals;
}
Also used : RocksDBException(org.rocksdb.RocksDBException) Versioned(voldemort.versioning.Versioned) PersistenceFailureException(voldemort.store.PersistenceFailureException)

Example 9 with RocksDBException

use of org.rocksdb.RocksDBException in project flink by apache.

the class RocksDBAggregatingState method get.

@Override
public R get() throws IOException {
    try {
        // prepare the current key and namespace for RocksDB lookup
        writeCurrentKeyWithGroupAndNamespace();
        final byte[] key = keySerializationStream.toByteArray();
        // get the current value
        final byte[] valueBytes = backend.db.get(columnFamily, key);
        if (valueBytes == null) {
            return null;
        }
        ACC accumulator = valueSerializer.deserialize(new DataInputViewStreamWrapper(new ByteArrayInputStreamWithPos(valueBytes)));
        return aggFunction.getResult(accumulator);
    } catch (IOException | RocksDBException e) {
        throw new IOException("Error while retrieving value from RocksDB", e);
    }
}
Also used : RocksDBException(org.rocksdb.RocksDBException) ByteArrayInputStreamWithPos(org.apache.flink.core.memory.ByteArrayInputStreamWithPos) IOException(java.io.IOException) DataInputViewStreamWrapper(org.apache.flink.core.memory.DataInputViewStreamWrapper)

Example 10 with RocksDBException

use of org.rocksdb.RocksDBException in project flink by apache.

the class RocksDBAggregatingState method add.

@Override
public void add(T value) throws IOException {
    try {
        // prepare the current key and namespace for RocksDB lookup
        writeCurrentKeyWithGroupAndNamespace();
        final byte[] key = keySerializationStream.toByteArray();
        keySerializationStream.reset();
        // get the current value
        final byte[] valueBytes = backend.db.get(columnFamily, key);
        // deserialize the current accumulator, or create a blank one
        final ACC accumulator = valueBytes == null ? aggFunction.createAccumulator() : valueSerializer.deserialize(new DataInputViewStreamWrapper(new ByteArrayInputStreamWithPos(valueBytes)));
        // aggregate the value into the accumulator
        aggFunction.add(value, accumulator);
        // serialize the new accumulator
        final DataOutputViewStreamWrapper out = new DataOutputViewStreamWrapper(keySerializationStream);
        valueSerializer.serialize(accumulator, out);
        // write the new value to RocksDB
        backend.db.put(columnFamily, writeOptions, key, keySerializationStream.toByteArray());
    } catch (IOException | RocksDBException e) {
        throw new IOException("Error while adding value to RocksDB", e);
    }
}
Also used : RocksDBException(org.rocksdb.RocksDBException) DataOutputViewStreamWrapper(org.apache.flink.core.memory.DataOutputViewStreamWrapper) ByteArrayInputStreamWithPos(org.apache.flink.core.memory.ByteArrayInputStreamWithPos) IOException(java.io.IOException) DataInputViewStreamWrapper(org.apache.flink.core.memory.DataInputViewStreamWrapper)

Aggregations

RocksDBException (org.rocksdb.RocksDBException)16 IOException (java.io.IOException)8 DataInputViewStreamWrapper (org.apache.flink.core.memory.DataInputViewStreamWrapper)6 PersistenceFailureException (voldemort.store.PersistenceFailureException)4 Versioned (voldemort.versioning.Versioned)4 ByteArrayInputStream (java.io.ByteArrayInputStream)3 ByteArrayInputStreamWithPos (org.apache.flink.core.memory.ByteArrayInputStreamWithPos)3 ColumnFamilyDescriptor (org.rocksdb.ColumnFamilyDescriptor)3 ArrayList (java.util.ArrayList)2 HashMap (java.util.HashMap)2 ProcessorStateException (org.apache.kafka.streams.errors.ProcessorStateException)2 ColumnFamilyHandle (org.rocksdb.ColumnFamilyHandle)2 Map (java.util.Map)1 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)1 DataOutputViewStreamWrapper (org.apache.flink.core.memory.DataOutputViewStreamWrapper)1 RegisteredBackendStateMetaInfo (org.apache.flink.runtime.state.RegisteredBackendStateMetaInfo)1 SamzaException (org.apache.samza.SamzaException)1 Config (org.apache.samza.config.Config)1 MapConfig (org.apache.samza.config.MapConfig)1 IntegerSerdeFactory (org.apache.samza.serializers.IntegerSerdeFactory)1