Search in sources :

Example 56 with StaticBuffer

use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.

the class HBaseStoreManagerMutationTest method testMutationToPutsTTL.

@Test
public void testMutationToPutsTTL() throws Exception {
    final Map<String, Map<StaticBuffer, KCVMutation>> storeMutationMap = new HashMap<>();
    final Map<StaticBuffer, KCVMutation> rowkeyMutationMap = new HashMap<>();
    final List<Long> expectedColumnsWithTTL = new ArrayList<>();
    final List<Long> putColumnsWithTTL = new ArrayList<>();
    List<Entry> additions = new ArrayList<>();
    List<StaticBuffer> deletions = new ArrayList<>();
    StaticBuffer rowkey = KeyColumnValueStoreUtil.longToByteBuffer(0);
    StaticBuffer col = KeyColumnValueStoreUtil.longToByteBuffer(1);
    StaticBuffer val = KeyColumnValueStoreUtil.longToByteBuffer(2);
    StaticArrayEntry e = (StaticArrayEntry) StaticArrayEntry.of(col, val);
    // Test TTL with int max value / 1000 + 1
    // When convert this value from second to millisec will over Integer limit
    e.setMetaData(EntryMetaData.TTL, Integer.MAX_VALUE / 1000 + 1);
    Integer ttl = (Integer) e.getMetaData().get(EntryMetaData.TTL);
    // convert second to millisec with long format
    expectedColumnsWithTTL.add(TimeUnit.SECONDS.toMillis((long) ttl));
    additions.add(e);
    deletions.add(e);
    rowkeyMutationMap.put(rowkey, new KCVMutation(additions, deletions));
    storeMutationMap.put("store1", rowkeyMutationMap);
    HBaseStoreManager manager = new HBaseStoreManager(hBaseContainer.getModifiableConfiguration());
    final Map<StaticBuffer, Pair<List<Put>, Delete>> commandsPerRowKey = manager.convertToCommands(storeMutationMap, 0L, 0L);
    Pair<List<Put>, Delete> commands = commandsPerRowKey.values().iterator().next();
    // Verify Put TTL
    Put put = commands.getFirst().get(0);
    putColumnsWithTTL.add(put.getTTL());
    assertArrayEquals(expectedColumnsWithTTL.toArray(), putColumnsWithTTL.toArray());
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) KCVMutation(org.janusgraph.diskstorage.keycolumnvalue.KCVMutation) StaticArrayEntry(org.janusgraph.diskstorage.util.StaticArrayEntry) Put(org.apache.hadoop.hbase.client.Put) StaticArrayEntry(org.janusgraph.diskstorage.util.StaticArrayEntry) Entry(org.janusgraph.diskstorage.Entry) StaticBuffer(org.janusgraph.diskstorage.StaticBuffer) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) Pair(org.apache.hadoop.hbase.util.Pair) Test(org.junit.jupiter.api.Test)

Example 57 with StaticBuffer

use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.

the class HBaseStoreManagerMutationTest method testKCVMutationToPuts.

@Test
public void testKCVMutationToPuts() throws Exception {
    final Map<String, Map<StaticBuffer, KCVMutation>> storeMutationMap = new HashMap<>();
    final Map<StaticBuffer, KCVMutation> rowkeyMutationMap = new HashMap<>();
    final List<Long> expectedColumnsWithTTL = new ArrayList<>();
    final List<Long> expectedColumnsWithoutTTL = new ArrayList<>();
    final List<Long> expectedColumnDelete = new ArrayList<>();
    StaticArrayEntry e = null;
    StaticBuffer rowkey, col, val;
    // 2 rows
    for (int row = 0; row < 2; row++) {
        rowkey = KeyColumnValueStoreUtil.longToByteBuffer(row);
        List<Entry> additions = new ArrayList<>();
        List<StaticBuffer> deletions = new ArrayList<>();
        // 100 columns each row
        int i;
        for (i = 0; i < 100; i++) {
            col = KeyColumnValueStoreUtil.longToByteBuffer(i);
            val = KeyColumnValueStoreUtil.longToByteBuffer(i + 100);
            e = (StaticArrayEntry) StaticArrayEntry.of(col, val);
            // Set half of the columns with TTL, also vary the TTL values
            if (i % 2 == 0) {
                e.setMetaData(EntryMetaData.TTL, i % 10 + 1);
                // Collect the columns with TTL. Only do this for one row
                if (row == 1) {
                    expectedColumnsWithTTL.add((long) i);
                }
            } else {
                // Collect the columns without TTL. Only do this for one row
                if (row == 1) {
                    expectedColumnsWithoutTTL.add((long) i);
                }
            }
            additions.add(e);
        }
        // Add one deletion to the row
        if (row == 1) {
            expectedColumnDelete.add((long) (i - 1));
        }
        deletions.add(e);
        rowkeyMutationMap.put(rowkey, new KCVMutation(additions, deletions));
    }
    storeMutationMap.put("store1", rowkeyMutationMap);
    HBaseStoreManager manager = new HBaseStoreManager(hBaseContainer.getModifiableConfiguration());
    final Map<StaticBuffer, Pair<List<Put>, Delete>> commandsPerRowKey = manager.convertToCommands(storeMutationMap, 0L, 0L);
    // 2 rows
    assertEquals(commandsPerRowKey.size(), 2);
    // Verify puts
    final List<Long> putColumnsWithTTL = new ArrayList<>();
    final List<Long> putColumnsWithoutTTL = new ArrayList<>();
    Pair<List<Put>, Delete> commands = commandsPerRowKey.values().iterator().next();
    long colName;
    for (Put p : commands.getFirst()) {
        // In Put, Long.MAX_VALUE means no TTL
        for (Map.Entry<byte[], List<Cell>> me : p.getFamilyCellMap().entrySet()) {
            for (Cell c : me.getValue()) {
                colName = KeyColumnValueStoreUtil.bufferToLong(new StaticArrayBuffer(CellUtil.cloneQualifier(c)));
                if (p.getTTL() < Long.MAX_VALUE) {
                    putColumnsWithTTL.add(colName);
                } else {
                    putColumnsWithoutTTL.add(colName);
                }
            }
        }
    }
    Collections.sort(putColumnsWithoutTTL);
    Collections.sort(putColumnsWithTTL);
    assertArrayEquals(expectedColumnsWithoutTTL.toArray(), putColumnsWithoutTTL.toArray());
    assertArrayEquals(expectedColumnsWithTTL.toArray(), putColumnsWithTTL.toArray());
    // Verify deletes
    final List<Long> deleteColumns = new ArrayList<>();
    Delete d = commands.getSecond();
    for (Map.Entry<byte[], List<Cell>> me : d.getFamilyCellMap().entrySet()) {
        for (Cell c : me.getValue()) {
            colName = KeyColumnValueStoreUtil.bufferToLong(new StaticArrayBuffer(CellUtil.cloneQualifier(c)));
            deleteColumns.add(colName);
        }
    }
    Collections.sort(deleteColumns);
    assertArrayEquals(expectedColumnDelete.toArray(), deleteColumns.toArray());
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) StaticArrayEntry(org.janusgraph.diskstorage.util.StaticArrayEntry) StaticArrayEntry(org.janusgraph.diskstorage.util.StaticArrayEntry) Entry(org.janusgraph.diskstorage.Entry) StaticBuffer(org.janusgraph.diskstorage.StaticBuffer) ArrayList(java.util.ArrayList) List(java.util.List) Cell(org.apache.hadoop.hbase.Cell) Pair(org.apache.hadoop.hbase.util.Pair) StaticArrayBuffer(org.janusgraph.diskstorage.util.StaticArrayBuffer) KCVMutation(org.janusgraph.diskstorage.keycolumnvalue.KCVMutation) Put(org.apache.hadoop.hbase.client.Put) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.jupiter.api.Test)

Example 58 with StaticBuffer

use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.

the class InMemoryColumnValueStoreTest method testMultipageDelete.

@Test
public void testMultipageDelete() throws TemporaryLockingException {
    int numEntries = 1001;
    StoreTransaction txh = mock(StoreTransaction.class);
    BaseTransactionConfig mockConfig = mock(BaseTransactionConfig.class);
    when(txh.getConfiguration()).thenReturn(mockConfig);
    when(mockConfig.getCustomOption(eq(STORAGE_TRANSACTIONAL))).thenReturn(true);
    InMemoryColumnValueStore cvs = new InMemoryColumnValueStore();
    // ColumnValueStore cvs = new DeflatedEntryColumnValueStore(false);
    List<Entry> additions = generateEntries(0, numEntries, "orig");
    cvs.mutate(additions, Collections.emptyList(), txh);
    EntryList result = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
    makeStaticBuffer(VERY_END)), txh);
    assertEquals(additions.size(), result.size());
    int windowStart = 494;
    int windowEnd = 501;
    List<StaticBuffer> deletions = new ArrayList<>(windowEnd - windowStart);
    deletions.addAll(additions.subList(windowStart, windowEnd).stream().map(Entry::getColumn).collect(Collectors.toList()));
    cvs.mutate(Collections.emptyList(), deletions, txh);
    result = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
    makeStaticBuffer(VERY_END)), txh);
    assertEquals(additions.size() - deletions.size(), result.size());
}
Also used : BufferPageTest.makeEntry(org.janusgraph.diskstorage.inmemory.BufferPageTest.makeEntry) Entry(org.janusgraph.diskstorage.Entry) StoreTransaction(org.janusgraph.diskstorage.keycolumnvalue.StoreTransaction) ArrayList(java.util.ArrayList) BaseTransactionConfig(org.janusgraph.diskstorage.BaseTransactionConfig) StaticBuffer(org.janusgraph.diskstorage.StaticBuffer) BufferPageTest.makeStaticBuffer(org.janusgraph.diskstorage.inmemory.BufferPageTest.makeStaticBuffer) EntryList(org.janusgraph.diskstorage.EntryList) KeySliceQuery(org.janusgraph.diskstorage.keycolumnvalue.KeySliceQuery) Test(org.junit.jupiter.api.Test)

Example 59 with StaticBuffer

use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.

the class InMemoryColumnValueStoreTest method testMultipageUpdateDelete.

@Test
public void testMultipageUpdateDelete() throws TemporaryLockingException {
    int numEntries = 2511;
    StoreTransaction txh = mock(StoreTransaction.class);
    BaseTransactionConfig mockConfig = mock(BaseTransactionConfig.class);
    when(txh.getConfiguration()).thenReturn(mockConfig);
    when(mockConfig.getCustomOption(eq(STORAGE_TRANSACTIONAL))).thenReturn(true);
    InMemoryColumnValueStore cvs = new InMemoryColumnValueStore();
    // ColumnValueStore cvs = new DeflatedEntryColumnValueStore(false);
    List<Entry> additions = generateEntries(0, numEntries, "orig");
    cvs.mutate(additions, Collections.emptyList(), txh);
    EntryList result = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
    makeStaticBuffer(VERY_END)), txh);
    assertEquals(additions.size(), result.size());
    int windowStart = 494;
    int windowEnd = 2002;
    // update
    List<Entry> updates = generateEntries(windowStart, windowEnd, "updated");
    cvs.mutate(updates, Collections.emptyList(), txh);
    result = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
    makeStaticBuffer(VERY_END)), txh);
    assertEquals(additions.size(), result.size());
    for (int i = 0; i < result.size(); i++) {
        if (windowStart < i && i < windowEnd) {
            assertEquals(updates.get(i - windowStart), result.get(i));
        } else {
            assertEquals(additions.get(i), result.get(i));
        }
    }
    // delete
    List<StaticBuffer> deletions = new ArrayList<>(windowEnd - windowStart);
    deletions.addAll(additions.subList(windowStart, windowEnd).stream().map(Entry::getColumn).collect(Collectors.toList()));
    cvs.mutate(Collections.emptyList(), deletions, txh);
    result = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
    makeStaticBuffer(VERY_END)), txh);
    assertEquals(additions.size() - deletions.size(), result.size());
}
Also used : BufferPageTest.makeEntry(org.janusgraph.diskstorage.inmemory.BufferPageTest.makeEntry) Entry(org.janusgraph.diskstorage.Entry) StoreTransaction(org.janusgraph.diskstorage.keycolumnvalue.StoreTransaction) ArrayList(java.util.ArrayList) BaseTransactionConfig(org.janusgraph.diskstorage.BaseTransactionConfig) StaticBuffer(org.janusgraph.diskstorage.StaticBuffer) BufferPageTest.makeStaticBuffer(org.janusgraph.diskstorage.inmemory.BufferPageTest.makeStaticBuffer) EntryList(org.janusgraph.diskstorage.EntryList) KeySliceQuery(org.janusgraph.diskstorage.keycolumnvalue.KeySliceQuery) Test(org.junit.jupiter.api.Test)

Example 60 with StaticBuffer

use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.

the class InMemoryColumnValueStoreTest method testPagingAndFragmentation.

@Test
public void testPagingAndFragmentation() throws TemporaryLockingException {
    int pageSize = InMemoryColumnValueStore.DEF_PAGE_SIZE;
    StoreTransaction txh = mock(StoreTransaction.class);
    BaseTransactionConfig mockConfig = mock(BaseTransactionConfig.class);
    when(txh.getConfiguration()).thenReturn(mockConfig);
    when(mockConfig.getCustomOption(eq(STORAGE_TRANSACTIONAL))).thenReturn(true);
    InMemoryColumnValueStore cvs = new InMemoryColumnValueStore();
    List<Entry> additions = generateEntries(0, pageSize * 5 + pageSize / 2, "orig");
    cvs.mutate(additions, Collections.emptyList(), txh);
    // We inserted more than pagesize in one go, so the store should switch to multipage buffer - but, as currently implemented,
    // this doesn't get immediately broken up into multiple pages, instead it will hold a single "oversized" page.
    // The break-up will only happen if there is a consequent update
    assertEquals(1, cvs.numPages(txh));
    // emulate update so that the single "oversized" page will be broken up into multiple pages of correct size
    cvs.mutate(additions.subList(1, 3), Collections.emptyList(), txh);
    assertEquals(6, cvs.numPages(txh));
    int numDeleted = 0;
    int windowStart = pageSize - pageSize / 3;
    int windowEnd = pageSize + pageSize / 3;
    // this should remove parts of page0 and page1
    List<StaticBuffer> deletions = new ArrayList<>(windowEnd - windowStart);
    deletions.addAll(additions.subList(windowStart, windowEnd).stream().map(Entry::getColumn).collect(Collectors.toList()));
    cvs.mutate(Collections.emptyList(), deletions, txh);
    numDeleted += windowEnd - windowStart;
    EntryList result = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
    makeStaticBuffer(VERY_END)), txh);
    assertEquals(additions.size() - numDeleted, result.size());
    SharedEntryBufferFragmentationReport report = cvs.createFragmentationReport(txh);
    assertEquals(6, report.getPageCount());
    assertEquals(3, report.getFragmentedPageCount());
    // since only 1/3 of each page is removed, the remains won't fit into one page anyway, so not deemed compressable
    assertEquals(0, report.getCompressableChunksCount());
    assertEquals(0, report.getCompressablePageCount());
    windowStart = pageSize * 4 - pageSize / 3;
    windowEnd = pageSize * 4 + pageSize / 3;
    // this should remove  parts of page3 and page 4
    deletions.clear();
    deletions.addAll(additions.subList(windowStart, windowEnd).stream().map(Entry::getColumn).collect(Collectors.toList()));
    cvs.mutate(Collections.emptyList(), deletions, txh);
    numDeleted += windowEnd - windowStart;
    result = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
    makeStaticBuffer(VERY_END)), txh);
    assertEquals(additions.size() - numDeleted, result.size());
    report = cvs.createFragmentationReport(txh);
    assertEquals(6, report.getPageCount());
    assertEquals(5, report.getFragmentedPageCount());
    // we now have pages 3 & 4 which are 2/3 full, PLUS page 5 which is half full => 3 pages compressable into 2
    assertEquals(1, report.getCompressableChunksCount());
    assertEquals(3, report.getCompressablePageCount());
    assertEquals(1, report.getAchievablePageReduction());
    cvs.quickDefragment(txh);
    EntryList result2 = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
    makeStaticBuffer(VERY_END)), txh);
    assertEquals(additions.size() - numDeleted, result2.size());
    for (int i = 0; i < result2.size(); i++) {
        assertEquals(result.get(i), result2.get(i));
    }
    // after quick defrag, we should have 5 pages in total, page 0 & 1 still fragmented, page 4 also not full
    report = cvs.createFragmentationReport(txh);
    assertEquals(5, report.getPageCount());
    assertEquals(3, report.getFragmentedPageCount());
    assertEquals(0, report.getCompressableChunksCount());
    assertEquals(0, report.getCompressablePageCount());
    assertEquals(0, report.getAchievablePageReduction());
    windowStart = pageSize - pageSize / 2;
    windowEnd = pageSize + pageSize / 2 + 1;
    // this should remove half of page0 and page1 each
    deletions.clear();
    deletions.addAll(additions.subList(windowStart, windowEnd).stream().map(Entry::getColumn).collect(Collectors.toList()));
    cvs.mutate(Collections.emptyList(), deletions, txh);
    numDeleted += (pageSize / 2 - pageSize / 3) * 2 + 1;
    result = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
    makeStaticBuffer(VERY_END)), txh);
    assertEquals(additions.size() - numDeleted, result.size());
    // now two first pages should become collapsible into one
    report = cvs.createFragmentationReport(txh);
    assertEquals(5, report.getPageCount());
    assertEquals(3, report.getFragmentedPageCount());
    assertEquals(1, report.getCompressableChunksCount());
    assertEquals(2, report.getCompressablePageCount());
    assertEquals(1, report.getAchievablePageReduction());
    cvs.quickDefragment(txh);
    result2 = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
    makeStaticBuffer(VERY_END)), txh);
    assertEquals(additions.size() - numDeleted, result2.size());
    for (int i = 0; i < result2.size(); i++) {
        assertEquals(result.get(i), result2.get(i));
    }
    // two first pages collapsed into one which is one entry short of full
    report = cvs.createFragmentationReport(txh);
    assertEquals(4, report.getPageCount());
    assertEquals(2, report.getFragmentedPageCount());
    assertEquals(0, report.getCompressableChunksCount());
    assertEquals(0, report.getCompressablePageCount());
    assertEquals(0, report.getAchievablePageReduction());
}
Also used : BufferPageTest.makeEntry(org.janusgraph.diskstorage.inmemory.BufferPageTest.makeEntry) Entry(org.janusgraph.diskstorage.Entry) StoreTransaction(org.janusgraph.diskstorage.keycolumnvalue.StoreTransaction) ArrayList(java.util.ArrayList) BaseTransactionConfig(org.janusgraph.diskstorage.BaseTransactionConfig) StaticBuffer(org.janusgraph.diskstorage.StaticBuffer) BufferPageTest.makeStaticBuffer(org.janusgraph.diskstorage.inmemory.BufferPageTest.makeStaticBuffer) EntryList(org.janusgraph.diskstorage.EntryList) KeySliceQuery(org.janusgraph.diskstorage.keycolumnvalue.KeySliceQuery) Test(org.junit.jupiter.api.Test)

Aggregations

StaticBuffer (org.janusgraph.diskstorage.StaticBuffer)101 Entry (org.janusgraph.diskstorage.Entry)36 Test (org.junit.jupiter.api.Test)36 ArrayList (java.util.ArrayList)27 HashMap (java.util.HashMap)20 Map (java.util.Map)19 StoreTransaction (org.janusgraph.diskstorage.keycolumnvalue.StoreTransaction)17 KeySliceQuery (org.janusgraph.diskstorage.keycolumnvalue.KeySliceQuery)16 StaticArrayEntry (org.janusgraph.diskstorage.util.StaticArrayEntry)16 BackendException (org.janusgraph.diskstorage.BackendException)15 List (java.util.List)14 EntryList (org.janusgraph.diskstorage.EntryList)14 TemporaryBackendException (org.janusgraph.diskstorage.TemporaryBackendException)14 KCVMutation (org.janusgraph.diskstorage.keycolumnvalue.KCVMutation)13 PermanentBackendException (org.janusgraph.diskstorage.PermanentBackendException)12 Instant (java.time.Instant)11 DataOutput (org.janusgraph.graphdb.database.serialize.DataOutput)10 ReadBuffer (org.janusgraph.diskstorage.ReadBuffer)8 ConsistentKeyLockStatus (org.janusgraph.diskstorage.locking.consistentkey.ConsistentKeyLockStatus)7 BackendOperation (org.janusgraph.diskstorage.util.BackendOperation)7