Search in sources :

Example 1 with StaticBuffer

use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.

the class HBaseStoreManager method convertToCommands.

/**
 * Convert JanusGraph internal Mutation representation into HBase native commands.
 *
 * @param mutations    Mutations to convert into HBase commands.
 * @param putTimestamp The timestamp to use for Put commands.
 * @param delTimestamp The timestamp to use for Delete commands.
 * @return Commands sorted by key converted from JanusGraph internal representation.
 * @throws org.janusgraph.diskstorage.PermanentBackendException
 */
@VisibleForTesting
Map<StaticBuffer, Pair<List<Put>, Delete>> convertToCommands(Map<String, Map<StaticBuffer, KCVMutation>> mutations, final long putTimestamp, final long delTimestamp) throws PermanentBackendException {
    // A map of rowkey to commands (list of Puts, Delete)
    final Map<StaticBuffer, Pair<List<Put>, Delete>> commandsPerKey = new HashMap<>();
    for (Map.Entry<String, Map<StaticBuffer, KCVMutation>> entry : mutations.entrySet()) {
        String cfString = getCfNameForStoreName(entry.getKey());
        byte[] cfName = Bytes.toBytes(cfString);
        for (Map.Entry<StaticBuffer, KCVMutation> m : entry.getValue().entrySet()) {
            final byte[] key = m.getKey().as(StaticBuffer.ARRAY_FACTORY);
            KCVMutation mutation = m.getValue();
            Pair<List<Put>, Delete> commands = commandsPerKey.get(m.getKey());
            // create the holder for a particular rowkey
            if (commands == null) {
                commands = new Pair<>();
                // List of all the Puts for this rowkey, including the ones without TTL and with TTL.
                final List<Put> putList = new ArrayList<>();
                commands.setFirst(putList);
                commandsPerKey.put(m.getKey(), commands);
            }
            if (mutation.hasDeletions()) {
                if (commands.getSecond() == null) {
                    Delete d = new Delete(key);
                    compat.setTimestamp(d, delTimestamp);
                    commands.setSecond(d);
                }
                for (StaticBuffer b : mutation.getDeletions()) {
                    // commands.getSecond() is a Delete for this rowkey.
                    commands.getSecond().deleteColumns(cfName, b.as(StaticBuffer.ARRAY_FACTORY), delTimestamp);
                }
            }
            if (mutation.hasAdditions()) {
                // All the entries (column cells) with the rowkey use this one Put, except the ones with TTL.
                final Put putColumnsWithoutTtl = new Put(key, putTimestamp);
                // that have TTL set.
                for (Entry e : mutation.getAdditions()) {
                    // Deal with TTL within the entry (column cell) first
                    // HBase cell level TTL is actually set at the Mutation/Put level.
                    // Therefore we need to construct a new Put for each entry (column cell) with TTL.
                    // We can not combine them because column cells within the same rowkey may:
                    // 1. have no TTL
                    // 2. have TTL
                    // 3. have different TTL
                    final Integer ttl = (Integer) e.getMetaData().get(EntryMetaData.TTL);
                    if (null != ttl && ttl > 0) {
                        // Create a new Put
                        Put putColumnWithTtl = new Put(key, putTimestamp);
                        addColumnToPut(putColumnWithTtl, cfName, putTimestamp, e);
                        // Convert ttl from second (JanusGraph TTL) to milliseconds (HBase TTL)
                        // @see JanusGraphManagement#setTTL(JanusGraphSchemaType, Duration)
                        // Cast Put to Mutation for backward compatibility with HBase 0.98.x
                        // HBase supports cell-level TTL for versions 0.98.6 and above.
                        ((Mutation) putColumnWithTtl).setTTL(ttl * 1000);
                        // commands.getFirst() is the list of Puts for this rowkey. Add this
                        // Put column with TTL to the list.
                        commands.getFirst().add(putColumnWithTtl);
                    } else {
                        addColumnToPut(putColumnsWithoutTtl, cfName, putTimestamp, e);
                    }
                }
                // If there were any mutations without TTL set, add them to commands.getFirst()
                if (!putColumnsWithoutTtl.isEmpty()) {
                    commands.getFirst().add(putColumnsWithoutTtl);
                }
            }
        }
    }
    return commandsPerKey;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Put(org.apache.hadoop.hbase.client.Put) KCVMutation(org.janusgraph.diskstorage.keycolumnvalue.KCVMutation) Entry(org.janusgraph.diskstorage.Entry) StaticBuffer(org.janusgraph.diskstorage.StaticBuffer) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) Mutation(org.apache.hadoop.hbase.client.Mutation) KCVMutation(org.janusgraph.diskstorage.keycolumnvalue.KCVMutation) Map(java.util.Map) BiMap(com.google.common.collect.BiMap) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) ImmutableBiMap(com.google.common.collect.ImmutableBiMap) Pair(org.apache.hadoop.hbase.util.Pair) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 2 with StaticBuffer

use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.

the class AstyanaxKeyColumnValueStore method getNamesSlice.

public Map<StaticBuffer, EntryList> getNamesSlice(List<StaticBuffer> keys, SliceQuery query, StoreTransaction txh) throws BackendException {
    /*
         * RowQuery<K,C> should be parametrized as
         * RowQuery<ByteBuffer,ByteBuffer>. However, this causes the following
         * compilation error when attempting to call withColumnRange on a
         * RowQuery<ByteBuffer,ByteBuffer> instance:
         *
         * java.lang.Error: Unresolved compilation problem: The method
         * withColumnRange(ByteBuffer, ByteBuffer, boolean, int) is ambiguous
         * for the type RowQuery<ByteBuffer,ByteBuffer>
         *
         * The compiler substitutes ByteBuffer=C for both startColumn and
         * endColumn, compares it to its identical twin with that type
         * hard-coded, and dies.
         *
         */
    // Add one for last column potentially removed in CassandraHelper.makeEntryList
    final int queryLimit = query.getLimit() + (query.hasLimit() ? 1 : 0);
    final int pageLimit = Math.min(this.readPageSize, queryLimit);
    ByteBuffer sliceStart = query.getSliceStart().asByteBuffer();
    final ByteBuffer sliceEnd = query.getSliceEnd().asByteBuffer();
    final RowSliceQuery rq = keyspace.prepareQuery(columnFamily).setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()).withRetryPolicy(retryPolicy.duplicate()).getKeySlice(CassandraHelper.convert(keys));
    // Don't directly chain due to ambiguity resolution; see top comment
    rq.withColumnRange(sliceStart, sliceEnd, false, pageLimit);
    final OperationResult<Rows<ByteBuffer, ByteBuffer>> r;
    try {
        r = (OperationResult<Rows<ByteBuffer, ByteBuffer>>) rq.execute();
    } catch (ConnectionException e) {
        throw new TemporaryBackendException(e);
    }
    final Rows<ByteBuffer, ByteBuffer> rows = r.getResult();
    final Map<StaticBuffer, EntryList> result = new HashMap<>(rows.size());
    for (Row<ByteBuffer, ByteBuffer> row : rows) {
        assert !result.containsKey(row.getKey());
        final ByteBuffer key = row.getKey();
        ColumnList<ByteBuffer> pageColumns = row.getColumns();
        final List<Column<ByteBuffer>> queryColumns = new ArrayList();
        Iterables.addAll(queryColumns, pageColumns);
        while (pageColumns.size() == pageLimit && queryColumns.size() < queryLimit) {
            final Column<ByteBuffer> lastColumn = queryColumns.get(queryColumns.size() - 1);
            sliceStart = lastColumn.getName();
            // No possibility of two values at the same column name, so start the
            // next slice one bit after the last column found by the previous query.
            // byte[] is little-endian
            Integer position = null;
            for (int i = sliceStart.array().length - 1; i >= 0; i--) {
                if (sliceStart.array()[i] < Byte.MAX_VALUE) {
                    position = i;
                    sliceStart.array()[i]++;
                    break;
                }
            }
            if (null == position) {
                throw new PermanentBackendException("Column was not incrementable");
            }
            final RowQuery pageQuery = keyspace.prepareQuery(columnFamily).setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()).withRetryPolicy(retryPolicy.duplicate()).getKey(row.getKey());
            // Don't directly chain due to ambiguity resolution; see top comment
            pageQuery.withColumnRange(sliceStart, sliceEnd, false, pageLimit);
            final OperationResult<ColumnList<ByteBuffer>> pageResult;
            try {
                pageResult = (OperationResult<ColumnList<ByteBuffer>>) pageQuery.execute();
            } catch (ConnectionException e) {
                throw new TemporaryBackendException(e);
            }
            if (Thread.interrupted()) {
                throw new TraversalInterruptedException();
            }
            // Reset the incremented position to avoid leaking mutations up the
            // stack to callers - sliceStart.array() in fact refers to a column name
            // that will be later read to deserialize an edge (since we assigned it
            // via de-referencing a column from the previous query).
            sliceStart.array()[position]--;
            pageColumns = pageResult.getResult();
            Iterables.addAll(queryColumns, pageColumns);
        }
        result.put(StaticArrayBuffer.of(key), CassandraHelper.makeEntryList(queryColumns, entryGetter, query.getSliceEnd(), query.getLimit()));
    }
    return result;
}
Also used : TraversalInterruptedException(org.apache.tinkerpop.gremlin.process.traversal.util.TraversalInterruptedException) PermanentBackendException(org.janusgraph.diskstorage.PermanentBackendException) EntryList(org.janusgraph.diskstorage.EntryList) ByteBuffer(java.nio.ByteBuffer) TemporaryBackendException(org.janusgraph.diskstorage.TemporaryBackendException) Column(com.netflix.astyanax.model.Column) RowSliceQuery(com.netflix.astyanax.query.RowSliceQuery) StaticBuffer(org.janusgraph.diskstorage.StaticBuffer) ColumnList(com.netflix.astyanax.model.ColumnList) ConnectionException(com.netflix.astyanax.connectionpool.exceptions.ConnectionException) RowQuery(com.netflix.astyanax.query.RowQuery) Rows(com.netflix.astyanax.model.Rows)

Example 3 with StaticBuffer

use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.

the class AstyanaxStoreManager method mutateMany.

@Override
public void mutateMany(Map<String, Map<StaticBuffer, KCVMutation>> batch, StoreTransaction txh) throws BackendException {
    MutationBatch m = keyspaceContext.getClient().prepareMutationBatch().withAtomicBatch(atomicBatch).setConsistencyLevel(getTx(txh).getWriteConsistencyLevel().getAstyanax()).withRetryPolicy(retryPolicy.duplicate());
    final MaskedTimestamp commitTime = new MaskedTimestamp(txh);
    for (Map.Entry<String, Map<StaticBuffer, KCVMutation>> batchentry : batch.entrySet()) {
        String storeName = batchentry.getKey();
        Preconditions.checkArgument(openStores.containsKey(storeName), "Store cannot be found: " + storeName);
        ColumnFamily<ByteBuffer, ByteBuffer> columnFamily = openStores.get(storeName).getColumnFamily();
        Map<StaticBuffer, KCVMutation> mutations = batchentry.getValue();
        for (Map.Entry<StaticBuffer, KCVMutation> ent : mutations.entrySet()) {
            // The CLMs for additions and deletions are separated because
            // Astyanax's operation timestamp cannot be set on a per-delete
            // or per-addition basis.
            KCVMutation janusgraphMutation = ent.getValue();
            ByteBuffer key = ent.getKey().asByteBuffer();
            if (janusgraphMutation.hasDeletions()) {
                ColumnListMutation<ByteBuffer> deletions = m.withRow(columnFamily, key);
                deletions.setTimestamp(commitTime.getDeletionTime(times));
                for (StaticBuffer b : janusgraphMutation.getDeletions()) deletions.deleteColumn(b.as(StaticBuffer.BB_FACTORY));
            }
            if (janusgraphMutation.hasAdditions()) {
                ColumnListMutation<ByteBuffer> updates = m.withRow(columnFamily, key);
                updates.setTimestamp(commitTime.getAdditionTime(times));
                for (Entry e : janusgraphMutation.getAdditions()) {
                    Integer ttl = (Integer) e.getMetaData().get(EntryMetaData.TTL);
                    if (null != ttl && ttl > 0) {
                        updates.putColumn(e.getColumnAs(StaticBuffer.BB_FACTORY), e.getValueAs(StaticBuffer.BB_FACTORY), ttl);
                    } else {
                        updates.putColumn(e.getColumnAs(StaticBuffer.BB_FACTORY), e.getValueAs(StaticBuffer.BB_FACTORY));
                    }
                }
            }
        }
    }
    try {
        m.execute();
    } catch (ConnectionException e) {
        throw new TemporaryBackendException(e);
    }
    sleepAfterWrite(txh, commitTime);
}
Also used : ByteBuffer(java.nio.ByteBuffer) KCVMutation(org.janusgraph.diskstorage.keycolumnvalue.KCVMutation) Entry(org.janusgraph.diskstorage.Entry) TemporaryBackendException(org.janusgraph.diskstorage.TemporaryBackendException) MutationBatch(com.netflix.astyanax.MutationBatch) StaticBuffer(org.janusgraph.diskstorage.StaticBuffer) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) ConnectionException(com.netflix.astyanax.connectionpool.exceptions.ConnectionException)

Example 4 with StaticBuffer

use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.

the class CassandraHelper method transformRange.

public static KeyRange transformRange(Token leftKeyExclusive, Token rightKeyInclusive) {
    if (!(leftKeyExclusive instanceof BytesToken))
        throw new UnsupportedOperationException();
    // if left part is BytesToken, right part should be too, otherwise there is no sense in the ring
    assert rightKeyInclusive instanceof BytesToken;
    // l is exclusive, r is inclusive
    BytesToken l = (BytesToken) leftKeyExclusive;
    BytesToken r = (BytesToken) rightKeyInclusive;
    byte[] leftTokenValue = l.getTokenValue();
    byte[] rightTokenValue = r.getTokenValue();
    Preconditions.checkArgument(leftTokenValue.length == rightTokenValue.length, "Tokens have unequal length");
    int tokenLength = leftTokenValue.length;
    byte[][] tokens = new byte[][] { leftTokenValue, rightTokenValue };
    byte[][] plusOne = new byte[2][tokenLength];
    for (int j = 0; j < 2; j++) {
        boolean carry = true;
        for (int i = tokenLength - 1; i >= 0; i--) {
            byte b = tokens[j][i];
            if (carry) {
                b++;
                carry = b == 0;
            }
            plusOne[j][i] = b;
        }
    }
    StaticBuffer lb = StaticArrayBuffer.of(plusOne[0]);
    StaticBuffer rb = StaticArrayBuffer.of(plusOne[1]);
    Preconditions.checkArgument(lb.length() == tokenLength, lb.length());
    Preconditions.checkArgument(rb.length() == tokenLength, rb.length());
    return new KeyRange(lb, rb);
}
Also used : KeyRange(org.janusgraph.diskstorage.keycolumnvalue.KeyRange) BytesToken(org.apache.cassandra.dht.BytesToken) StaticBuffer(org.janusgraph.diskstorage.StaticBuffer)

Example 5 with StaticBuffer

use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.

the class BerkeleyJEStoreManager method mutateMany.

@Override
public void mutateMany(Map<String, KVMutation> mutations, StoreTransaction txh) throws BackendException {
    for (Map.Entry<String, KVMutation> mutation : mutations.entrySet()) {
        BerkeleyJEKeyValueStore store = openDatabase(mutation.getKey());
        KVMutation mutationValue = mutation.getValue();
        if (!mutationValue.hasAdditions() && !mutationValue.hasDeletions()) {
            log.debug("Empty mutation set for {}, doing nothing", mutation.getKey());
        } else {
            log.debug("Mutating {}", mutation.getKey());
        }
        if (mutationValue.hasAdditions()) {
            for (KeyValueEntry entry : mutationValue.getAdditions()) {
                store.insert(entry.getKey(), entry.getValue(), txh, entry.getTtl());
                log.trace("Insertion on {}: {}", mutation.getKey(), entry);
            }
        }
        if (mutationValue.hasDeletions()) {
            for (StaticBuffer del : mutationValue.getDeletions()) {
                store.delete(del, txh);
                log.trace("Deletion on {}: {}", mutation.getKey(), del);
            }
        }
    }
}
Also used : KVMutation(org.janusgraph.diskstorage.keycolumnvalue.keyvalue.KVMutation) StaticBuffer(org.janusgraph.diskstorage.StaticBuffer) HashMap(java.util.HashMap) Map(java.util.Map) KeyValueEntry(org.janusgraph.diskstorage.keycolumnvalue.keyvalue.KeyValueEntry)

Aggregations

StaticBuffer (org.janusgraph.diskstorage.StaticBuffer)101 Entry (org.janusgraph.diskstorage.Entry)36 Test (org.junit.jupiter.api.Test)36 ArrayList (java.util.ArrayList)27 HashMap (java.util.HashMap)20 Map (java.util.Map)19 StoreTransaction (org.janusgraph.diskstorage.keycolumnvalue.StoreTransaction)17 KeySliceQuery (org.janusgraph.diskstorage.keycolumnvalue.KeySliceQuery)16 StaticArrayEntry (org.janusgraph.diskstorage.util.StaticArrayEntry)16 BackendException (org.janusgraph.diskstorage.BackendException)15 List (java.util.List)14 EntryList (org.janusgraph.diskstorage.EntryList)14 TemporaryBackendException (org.janusgraph.diskstorage.TemporaryBackendException)14 KCVMutation (org.janusgraph.diskstorage.keycolumnvalue.KCVMutation)13 PermanentBackendException (org.janusgraph.diskstorage.PermanentBackendException)12 Instant (java.time.Instant)11 DataOutput (org.janusgraph.graphdb.database.serialize.DataOutput)10 ReadBuffer (org.janusgraph.diskstorage.ReadBuffer)8 ConsistentKeyLockStatus (org.janusgraph.diskstorage.locking.consistentkey.ConsistentKeyLockStatus)7 BackendOperation (org.janusgraph.diskstorage.util.BackendOperation)7