Search in sources :

Example 6 with StaticBuffer

use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.

the class VariableLongTest method byteOrderPreservingPositiveBackward.

@Test
public void byteOrderPreservingPositiveBackward() {
    long[] scalingFactors = { Long.MAX_VALUE, 1000, 1000000000L };
    for (int t = 0; t < 10000000; t++) {
        StaticBuffer[] b = new StaticBuffer[2];
        long[] l = new long[2];
        for (int i = 0; i < 2; i++) {
            l[i] = randomPosLong(scalingFactors[random.nextInt(scalingFactors.length)]);
            WriteBuffer out = new WriteByteBuffer(11);
            VariableLong.writePositiveBackward(out, l[i]);
            b[i] = out.getStaticBuffer();
            ReadBuffer res = b[i].asReadBuffer();
            res.movePositionTo(res.length());
            assertEquals(l[i], VariableLong.readPositiveBackward(res));
        }
        // System.out.println(l[0] + " vs " + l[1]);
        assertEquals(Math.signum(Long.compare(l[0], l[1])), Math.signum(b[0].compareTo(b[1])), 0.01);
    }
}
Also used : ReadBuffer(org.janusgraph.diskstorage.ReadBuffer) WriteBuffer(org.janusgraph.diskstorage.WriteBuffer) WriteByteBuffer(org.janusgraph.diskstorage.util.WriteByteBuffer) StaticBuffer(org.janusgraph.diskstorage.StaticBuffer) Test(org.junit.Test)

Example 7 with StaticBuffer

use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.

the class SerializerTest method testSerializedOrder.

@Test
public void testSerializedOrder() {
    serialize.registerClass(1, TClass1.class, new TClass1Serializer());
    final Map<Class, Factory> sortTypes = new HashMap<>();
    for (Map.Entry<Class, Factory> entry : TYPES.entrySet()) {
        if (serialize.isOrderPreservingDatatype(entry.getKey()))
            sortTypes.put(entry.getKey(), entry.getValue());
    }
    assertEquals(10, sortTypes.size());
    for (int t = 0; t < 3000000; t++) {
        DataOutput o1 = serialize.getDataOutput(64);
        DataOutput o2 = serialize.getDataOutput(64);
        Map.Entry<Class, Factory> type = Iterables.get(sortTypes.entrySet(), random.nextInt(sortTypes.size()));
        Comparable c1 = (Comparable) type.getValue().newInstance();
        Comparable c2 = (Comparable) type.getValue().newInstance();
        o1.writeObjectByteOrder(c1, type.getKey());
        o2.writeObjectByteOrder(c2, type.getKey());
        StaticBuffer s1 = o1.getStaticBuffer();
        StaticBuffer s2 = o2.getStaticBuffer();
        assertEquals(Math.signum(c1.compareTo(c2)), Math.signum(s1.compareTo(s2)), 0.0);
        Object c1o = serialize.readObjectByteOrder(s1.asReadBuffer(), type.getKey());
        Object c2o = serialize.readObjectByteOrder(s2.asReadBuffer(), type.getKey());
        assertEquals(c1, c1o);
        assertEquals(c2, c2o);
    }
}
Also used : DataOutput(org.janusgraph.graphdb.database.serialize.DataOutput) LoggerFactory(org.slf4j.LoggerFactory) StaticBuffer(org.janusgraph.diskstorage.StaticBuffer) Test(org.junit.Test)

Example 8 with StaticBuffer

use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.

the class SerializerTest method parallelDeserialization.

@Test
public void parallelDeserialization() throws InterruptedException {
    serialize.registerClass(1, TClass2.class, new TClass2Serializer());
    final long value = 8;
    final String str = "123456";
    final TClass2 c = new TClass2("abcdefg", 333);
    DataOutput out = serialize.getDataOutput(128);
    out.putLong(value);
    out.writeClassAndObject(value);
    out.writeObject(c, TClass2.class);
    out.writeObjectNotNull(str);
    final StaticBuffer b = out.getStaticBuffer();
    int numThreads = 4;
    Thread[] threads = new Thread[numThreads];
    for (int i = 0; i < numThreads; i++) {
        threads[i] = new Thread(() -> {
            for (int j = 0; j < 100000; j++) {
                ReadBuffer buffer = b.asReadBuffer();
                assertEquals(8, buffer.getLong());
                assertEquals(value, (long) serialize.readClassAndObject(buffer));
                assertEquals(c, serialize.readObject(buffer, TClass2.class));
                assertEquals(str, serialize.readObjectNotNull(buffer, String.class));
            }
        });
        threads[i].start();
    }
    for (int i = 0; i < numThreads; i++) {
        threads[i].join();
    }
}
Also used : DataOutput(org.janusgraph.graphdb.database.serialize.DataOutput) ReadBuffer(org.janusgraph.diskstorage.ReadBuffer) StaticBuffer(org.janusgraph.diskstorage.StaticBuffer) Test(org.junit.Test)

Example 9 with StaticBuffer

use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.

the class HBaseStoreManager method convertToCommands.

/**
 * Convert JanusGraph internal Mutation representation into HBase native commands.
 *
 * @param mutations    Mutations to convert into HBase commands.
 * @param putTimestamp The timestamp to use for Put commands.
 * @param delTimestamp The timestamp to use for Delete commands.
 * @return Commands sorted by key converted from JanusGraph internal representation.
 * @throws org.janusgraph.diskstorage.PermanentBackendException
 */
@VisibleForTesting
Map<StaticBuffer, Pair<List<Put>, Delete>> convertToCommands(Map<String, Map<StaticBuffer, KCVMutation>> mutations, final long putTimestamp, final long delTimestamp) throws PermanentBackendException {
    // A map of rowkey to commands (list of Puts, Delete)
    final Map<StaticBuffer, Pair<List<Put>, Delete>> commandsPerKey = new HashMap<>();
    for (Map.Entry<String, Map<StaticBuffer, KCVMutation>> entry : mutations.entrySet()) {
        String cfString = getCfNameForStoreName(entry.getKey());
        byte[] cfName = Bytes.toBytes(cfString);
        for (Map.Entry<StaticBuffer, KCVMutation> m : entry.getValue().entrySet()) {
            final byte[] key = m.getKey().as(StaticBuffer.ARRAY_FACTORY);
            KCVMutation mutation = m.getValue();
            Pair<List<Put>, Delete> commands = commandsPerKey.get(m.getKey());
            // create the holder for a particular rowkey
            if (commands == null) {
                commands = new Pair<>();
                // List of all the Puts for this rowkey, including the ones without TTL and with TTL.
                final List<Put> putList = new ArrayList<>();
                commands.setFirst(putList);
                commandsPerKey.put(m.getKey(), commands);
            }
            if (mutation.hasDeletions()) {
                if (commands.getSecond() == null) {
                    Delete d = new Delete(key);
                    compat.setTimestamp(d, delTimestamp);
                    commands.setSecond(d);
                }
                for (StaticBuffer b : mutation.getDeletions()) {
                    // commands.getSecond() is a Delete for this rowkey.
                    commands.getSecond().deleteColumns(cfName, b.as(StaticBuffer.ARRAY_FACTORY), delTimestamp);
                }
            }
            if (mutation.hasAdditions()) {
                // All the entries (column cells) with the rowkey use this one Put, except the ones with TTL.
                final Put putColumnsWithoutTtl = new Put(key, putTimestamp);
                // that have TTL set.
                for (Entry e : mutation.getAdditions()) {
                    // Deal with TTL within the entry (column cell) first
                    // HBase cell level TTL is actually set at the Mutation/Put level.
                    // Therefore we need to construct a new Put for each entry (column cell) with TTL.
                    // We can not combine them because column cells within the same rowkey may:
                    // 1. have no TTL
                    // 2. have TTL
                    // 3. have different TTL
                    final Integer ttl = (Integer) e.getMetaData().get(EntryMetaData.TTL);
                    if (null != ttl && ttl > 0) {
                        // Create a new Put
                        Put putColumnWithTtl = new Put(key, putTimestamp);
                        addColumnToPut(putColumnWithTtl, cfName, putTimestamp, e);
                        // Convert ttl from second (JanusGraph TTL) to milliseconds (HBase TTL)
                        // @see JanusGraphManagement#setTTL(JanusGraphSchemaType, Duration)
                        // Cast Put to Mutation for backward compatibility with HBase 0.98.x
                        // HBase supports cell-level TTL for versions 0.98.6 and above.
                        ((Mutation) putColumnWithTtl).setTTL(ttl * 1000);
                        // commands.getFirst() is the list of Puts for this rowkey. Add this
                        // Put column with TTL to the list.
                        commands.getFirst().add(putColumnWithTtl);
                    } else {
                        addColumnToPut(putColumnsWithoutTtl, cfName, putTimestamp, e);
                    }
                }
                // If there were any mutations without TTL set, add them to commands.getFirst()
                if (!putColumnsWithoutTtl.isEmpty()) {
                    commands.getFirst().add(putColumnsWithoutTtl);
                }
            }
        }
    }
    return commandsPerKey;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Put(org.apache.hadoop.hbase.client.Put) KCVMutation(org.janusgraph.diskstorage.keycolumnvalue.KCVMutation) Entry(org.janusgraph.diskstorage.Entry) StaticBuffer(org.janusgraph.diskstorage.StaticBuffer) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) Mutation(org.apache.hadoop.hbase.client.Mutation) KCVMutation(org.janusgraph.diskstorage.keycolumnvalue.KCVMutation) Map(java.util.Map) BiMap(com.google.common.collect.BiMap) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) ImmutableBiMap(com.google.common.collect.ImmutableBiMap) Pair(org.apache.hadoop.hbase.util.Pair) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 10 with StaticBuffer

use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.

the class HBaseStoreManager method normalizeKeyBounds.

/**
 * Given a map produced by {@link HTable#getRegionLocations()}, transform
 * each key from an {@link HRegionInfo} to a {@link KeyRange} expressing the
 * region's start and end key bounds using JanusGraph-partitioning-friendly
 * conventions (start inclusive, end exclusive, zero bytes appended where
 * necessary to make all keys at least 4 bytes long).
 * <p/>
 * This method iterates over the entries in its map parameter and performs
 * the following conditional conversions on its keys. "Require" below means
 * either a {@link Preconditions} invocation or an assertion. HRegionInfo
 * sometimes returns start and end keys of zero length; this method replaces
 * zero length keys with null before doing any of the checks described
 * below. The parameter map and the values it contains are only read and
 * never modified.
 *
 * <ul>
 * <li>If an entry's HRegionInfo has null start and end keys, then first
 * require that the parameter map is a singleton, and then return a
 * single-entry map whose {@code KeyRange} has start and end buffers that
 * are both four bytes of zeros.</li>
 * <li>If the entry has a null end key (but non-null start key), put an
 * equivalent entry in the result map with a start key identical to the
 * input, except that zeros are appended to values less than 4 bytes long,
 * and an end key that is four bytes of zeros.
 * <li>If the entry has a null start key (but non-null end key), put an
 * equivalent entry in the result map where the start key is four bytes of
 * zeros, and the end key has zeros appended, if necessary, to make it at
 * least 4 bytes long, after which one is added to the padded value in
 * unsigned 32-bit arithmetic with overflow allowed.</li>
 * <li>Any entry which matches none of the above criteria results in an
 * equivalent entry in the returned map, except that zeros are appended to
 * both keys to make each at least 4 bytes long, and the end key is then
 * incremented as described in the last bullet point.</li>
 * </ul>
 *
 * After iterating over the parameter map, this method checks that it either
 * saw no entries with null keys, one entry with a null start key and a
 * different entry with a null end key, or one entry with both start and end
 * keys null. If any null keys are observed besides these three cases, the
 * method will die with a precondition failure.
 *
 * @param locations A list of HRegionInfo
 * @return JanusGraph-friendly expression of each region's rowkey boundaries
 */
private Map<KeyRange, ServerName> normalizeKeyBounds(List<HRegionLocation> locations) {
    HRegionLocation nullStart = null;
    HRegionLocation nullEnd = null;
    ImmutableMap.Builder<KeyRange, ServerName> b = ImmutableMap.builder();
    for (HRegionLocation location : locations) {
        HRegionInfo regionInfo = location.getRegionInfo();
        ServerName serverName = location.getServerName();
        byte[] startKey = regionInfo.getStartKey();
        byte[] endKey = regionInfo.getEndKey();
        if (0 == startKey.length) {
            startKey = null;
            logger.trace("Converted zero-length HBase startKey byte array to null");
        }
        if (0 == endKey.length) {
            endKey = null;
            logger.trace("Converted zero-length HBase endKey byte array to null");
        }
        if (null == startKey && null == endKey) {
            Preconditions.checkState(1 == locations.size());
            logger.debug("HBase table {} has a single region {}", tableName, regionInfo);
            // Choose arbitrary shared value = startKey = endKey
            return b.put(new KeyRange(FOUR_ZERO_BYTES, FOUR_ZERO_BYTES), serverName).build();
        } else if (null == startKey) {
            logger.debug("Found HRegionInfo with null startKey on server {}: {}", serverName, regionInfo);
            Preconditions.checkState(null == nullStart);
            nullStart = location;
            // I thought endBuf would be inclusive from the HBase javadoc, but in practice it is exclusive
            StaticBuffer endBuf = StaticArrayBuffer.of(zeroExtend(endKey));
            // Replace null start key with zeroes
            b.put(new KeyRange(FOUR_ZERO_BYTES, endBuf), serverName);
        } else if (null == endKey) {
            logger.debug("Found HRegionInfo with null endKey on server {}: {}", serverName, regionInfo);
            Preconditions.checkState(null == nullEnd);
            nullEnd = location;
            // Replace null end key with zeroes
            b.put(new KeyRange(StaticArrayBuffer.of(zeroExtend(startKey)), FOUR_ZERO_BYTES), serverName);
        } else {
            // Convert HBase's inclusive end keys into exclusive JanusGraph end keys
            StaticBuffer startBuf = StaticArrayBuffer.of(zeroExtend(startKey));
            StaticBuffer endBuf = StaticArrayBuffer.of(zeroExtend(endKey));
            KeyRange kr = new KeyRange(startBuf, endBuf);
            b.put(kr, serverName);
            logger.debug("Found HRegionInfo with non-null end and start keys on server {}: {}", serverName, regionInfo);
        }
    }
    // Require either no null key bounds or a pair of them
    Preconditions.checkState((null == nullStart) == (null == nullEnd));
    // Check that every key in the result is at least 4 bytes long
    Map<KeyRange, ServerName> result = b.build();
    for (KeyRange kr : result.keySet()) {
        Preconditions.checkState(4 <= kr.getStart().length());
        Preconditions.checkState(4 <= kr.getEnd().length());
    }
    return result;
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) KeyRange(org.janusgraph.diskstorage.keycolumnvalue.KeyRange) ServerName(org.apache.hadoop.hbase.ServerName) StaticBuffer(org.janusgraph.diskstorage.StaticBuffer) ImmutableMap(com.google.common.collect.ImmutableMap)

Aggregations

StaticBuffer (org.janusgraph.diskstorage.StaticBuffer)45 Test (org.junit.Test)17 Entry (org.janusgraph.diskstorage.Entry)14 ArrayList (java.util.ArrayList)10 ReadBuffer (org.janusgraph.diskstorage.ReadBuffer)8 DataOutput (org.janusgraph.graphdb.database.serialize.DataOutput)8 SliceQuery (org.janusgraph.diskstorage.keycolumnvalue.SliceQuery)7 ByteBuffer (java.nio.ByteBuffer)6 List (java.util.List)6 Map (java.util.Map)6 ResultSet (com.datastax.driver.core.ResultSet)5 ImmutableMap (com.google.common.collect.ImmutableMap)5 BackendException (org.janusgraph.diskstorage.BackendException)5 Row (com.datastax.driver.core.Row)4 Tuple (io.vavr.Tuple)4 Array (io.vavr.collection.Array)4 IOException (java.io.IOException)4 Instant (java.time.Instant)4 HashMap (java.util.HashMap)4 Random (java.util.Random)4