Search in sources :

Example 41 with Entry

use of org.janusgraph.diskstorage.Entry in project janusgraph by JanusGraph.

the class InMemoryColumnValueStoreTest method testMultipageUpdateDelete.

@Test
public void testMultipageUpdateDelete() throws TemporaryLockingException {
    int numEntries = 2511;
    StoreTransaction txh = mock(StoreTransaction.class);
    BaseTransactionConfig mockConfig = mock(BaseTransactionConfig.class);
    when(txh.getConfiguration()).thenReturn(mockConfig);
    when(mockConfig.getCustomOption(eq(STORAGE_TRANSACTIONAL))).thenReturn(true);
    InMemoryColumnValueStore cvs = new InMemoryColumnValueStore();
    // ColumnValueStore cvs = new DeflatedEntryColumnValueStore(false);
    List<Entry> additions = generateEntries(0, numEntries, "orig");
    cvs.mutate(additions, Collections.emptyList(), txh);
    EntryList result = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
    makeStaticBuffer(VERY_END)), txh);
    assertEquals(additions.size(), result.size());
    int windowStart = 494;
    int windowEnd = 2002;
    // update
    List<Entry> updates = generateEntries(windowStart, windowEnd, "updated");
    cvs.mutate(updates, Collections.emptyList(), txh);
    result = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
    makeStaticBuffer(VERY_END)), txh);
    assertEquals(additions.size(), result.size());
    for (int i = 0; i < result.size(); i++) {
        if (windowStart < i && i < windowEnd) {
            assertEquals(updates.get(i - windowStart), result.get(i));
        } else {
            assertEquals(additions.get(i), result.get(i));
        }
    }
    // delete
    List<StaticBuffer> deletions = new ArrayList<>(windowEnd - windowStart);
    deletions.addAll(additions.subList(windowStart, windowEnd).stream().map(Entry::getColumn).collect(Collectors.toList()));
    cvs.mutate(Collections.emptyList(), deletions, txh);
    result = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
    makeStaticBuffer(VERY_END)), txh);
    assertEquals(additions.size() - deletions.size(), result.size());
}
Also used : BufferPageTest.makeEntry(org.janusgraph.diskstorage.inmemory.BufferPageTest.makeEntry) Entry(org.janusgraph.diskstorage.Entry) StoreTransaction(org.janusgraph.diskstorage.keycolumnvalue.StoreTransaction) ArrayList(java.util.ArrayList) BaseTransactionConfig(org.janusgraph.diskstorage.BaseTransactionConfig) StaticBuffer(org.janusgraph.diskstorage.StaticBuffer) BufferPageTest.makeStaticBuffer(org.janusgraph.diskstorage.inmemory.BufferPageTest.makeStaticBuffer) EntryList(org.janusgraph.diskstorage.EntryList) KeySliceQuery(org.janusgraph.diskstorage.keycolumnvalue.KeySliceQuery) Test(org.junit.jupiter.api.Test)

Example 42 with Entry

use of org.janusgraph.diskstorage.Entry in project janusgraph by JanusGraph.

the class InMemoryColumnValueStoreTest method testPagingAndFragmentation.

@Test
public void testPagingAndFragmentation() throws TemporaryLockingException {
    int pageSize = InMemoryColumnValueStore.DEF_PAGE_SIZE;
    StoreTransaction txh = mock(StoreTransaction.class);
    BaseTransactionConfig mockConfig = mock(BaseTransactionConfig.class);
    when(txh.getConfiguration()).thenReturn(mockConfig);
    when(mockConfig.getCustomOption(eq(STORAGE_TRANSACTIONAL))).thenReturn(true);
    InMemoryColumnValueStore cvs = new InMemoryColumnValueStore();
    List<Entry> additions = generateEntries(0, pageSize * 5 + pageSize / 2, "orig");
    cvs.mutate(additions, Collections.emptyList(), txh);
    // We inserted more than pagesize in one go, so the store should switch to multipage buffer - but, as currently implemented,
    // this doesn't get immediately broken up into multiple pages, instead it will hold a single "oversized" page.
    // The break-up will only happen if there is a consequent update
    assertEquals(1, cvs.numPages(txh));
    // emulate update so that the single "oversized" page will be broken up into multiple pages of correct size
    cvs.mutate(additions.subList(1, 3), Collections.emptyList(), txh);
    assertEquals(6, cvs.numPages(txh));
    int numDeleted = 0;
    int windowStart = pageSize - pageSize / 3;
    int windowEnd = pageSize + pageSize / 3;
    // this should remove parts of page0 and page1
    List<StaticBuffer> deletions = new ArrayList<>(windowEnd - windowStart);
    deletions.addAll(additions.subList(windowStart, windowEnd).stream().map(Entry::getColumn).collect(Collectors.toList()));
    cvs.mutate(Collections.emptyList(), deletions, txh);
    numDeleted += windowEnd - windowStart;
    EntryList result = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
    makeStaticBuffer(VERY_END)), txh);
    assertEquals(additions.size() - numDeleted, result.size());
    SharedEntryBufferFragmentationReport report = cvs.createFragmentationReport(txh);
    assertEquals(6, report.getPageCount());
    assertEquals(3, report.getFragmentedPageCount());
    // since only 1/3 of each page is removed, the remains won't fit into one page anyway, so not deemed compressable
    assertEquals(0, report.getCompressableChunksCount());
    assertEquals(0, report.getCompressablePageCount());
    windowStart = pageSize * 4 - pageSize / 3;
    windowEnd = pageSize * 4 + pageSize / 3;
    // this should remove  parts of page3 and page 4
    deletions.clear();
    deletions.addAll(additions.subList(windowStart, windowEnd).stream().map(Entry::getColumn).collect(Collectors.toList()));
    cvs.mutate(Collections.emptyList(), deletions, txh);
    numDeleted += windowEnd - windowStart;
    result = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
    makeStaticBuffer(VERY_END)), txh);
    assertEquals(additions.size() - numDeleted, result.size());
    report = cvs.createFragmentationReport(txh);
    assertEquals(6, report.getPageCount());
    assertEquals(5, report.getFragmentedPageCount());
    // we now have pages 3 & 4 which are 2/3 full, PLUS page 5 which is half full => 3 pages compressable into 2
    assertEquals(1, report.getCompressableChunksCount());
    assertEquals(3, report.getCompressablePageCount());
    assertEquals(1, report.getAchievablePageReduction());
    cvs.quickDefragment(txh);
    EntryList result2 = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
    makeStaticBuffer(VERY_END)), txh);
    assertEquals(additions.size() - numDeleted, result2.size());
    for (int i = 0; i < result2.size(); i++) {
        assertEquals(result.get(i), result2.get(i));
    }
    // after quick defrag, we should have 5 pages in total, page 0 & 1 still fragmented, page 4 also not full
    report = cvs.createFragmentationReport(txh);
    assertEquals(5, report.getPageCount());
    assertEquals(3, report.getFragmentedPageCount());
    assertEquals(0, report.getCompressableChunksCount());
    assertEquals(0, report.getCompressablePageCount());
    assertEquals(0, report.getAchievablePageReduction());
    windowStart = pageSize - pageSize / 2;
    windowEnd = pageSize + pageSize / 2 + 1;
    // this should remove half of page0 and page1 each
    deletions.clear();
    deletions.addAll(additions.subList(windowStart, windowEnd).stream().map(Entry::getColumn).collect(Collectors.toList()));
    cvs.mutate(Collections.emptyList(), deletions, txh);
    numDeleted += (pageSize / 2 - pageSize / 3) * 2 + 1;
    result = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
    makeStaticBuffer(VERY_END)), txh);
    assertEquals(additions.size() - numDeleted, result.size());
    // now two first pages should become collapsible into one
    report = cvs.createFragmentationReport(txh);
    assertEquals(5, report.getPageCount());
    assertEquals(3, report.getFragmentedPageCount());
    assertEquals(1, report.getCompressableChunksCount());
    assertEquals(2, report.getCompressablePageCount());
    assertEquals(1, report.getAchievablePageReduction());
    cvs.quickDefragment(txh);
    result2 = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
    makeStaticBuffer(VERY_END)), txh);
    assertEquals(additions.size() - numDeleted, result2.size());
    for (int i = 0; i < result2.size(); i++) {
        assertEquals(result.get(i), result2.get(i));
    }
    // two first pages collapsed into one which is one entry short of full
    report = cvs.createFragmentationReport(txh);
    assertEquals(4, report.getPageCount());
    assertEquals(2, report.getFragmentedPageCount());
    assertEquals(0, report.getCompressableChunksCount());
    assertEquals(0, report.getCompressablePageCount());
    assertEquals(0, report.getAchievablePageReduction());
}
Also used : BufferPageTest.makeEntry(org.janusgraph.diskstorage.inmemory.BufferPageTest.makeEntry) Entry(org.janusgraph.diskstorage.Entry) StoreTransaction(org.janusgraph.diskstorage.keycolumnvalue.StoreTransaction) ArrayList(java.util.ArrayList) BaseTransactionConfig(org.janusgraph.diskstorage.BaseTransactionConfig) StaticBuffer(org.janusgraph.diskstorage.StaticBuffer) BufferPageTest.makeStaticBuffer(org.janusgraph.diskstorage.inmemory.BufferPageTest.makeStaticBuffer) EntryList(org.janusgraph.diskstorage.EntryList) KeySliceQuery(org.janusgraph.diskstorage.keycolumnvalue.KeySliceQuery) Test(org.junit.jupiter.api.Test)

Example 43 with Entry

use of org.janusgraph.diskstorage.Entry in project janusgraph by JanusGraph.

the class StandardJanusGraph method prepareCommit.

public ModificationSummary prepareCommit(final Collection<InternalRelation> addedRelations, final Collection<InternalRelation> deletedRelations, final Predicate<InternalRelation> filter, final BackendTransaction mutator, final StandardJanusGraphTx tx, final boolean acquireLocks) throws BackendException {
    ListMultimap<Long, InternalRelation> mutations = ArrayListMultimap.create();
    ListMultimap<InternalVertex, InternalRelation> mutatedProperties = ArrayListMultimap.create();
    List<IndexSerializer.IndexUpdate> indexUpdates = Lists.newArrayList();
    // 1) Collect deleted edges and their index updates and acquire edge locks
    for (InternalRelation del : Iterables.filter(deletedRelations, filter)) {
        Preconditions.checkArgument(del.isRemoved());
        for (int pos = 0; pos < del.getLen(); pos++) {
            InternalVertex vertex = del.getVertex(pos);
            if (pos == 0 || !del.isLoop()) {
                if (del.isProperty())
                    mutatedProperties.put(vertex, del);
                mutations.put(vertex.longId(), del);
            }
            if (acquireLock(del, pos, acquireLocks)) {
                Entry entry = edgeSerializer.writeRelation(del, pos, tx);
                mutator.acquireEdgeLock(idManager.getKey(vertex.longId()), entry);
            }
        }
        indexUpdates.addAll(indexSerializer.getIndexUpdates(del));
    }
    // 2) Collect added edges and their index updates and acquire edge locks
    for (InternalRelation add : Iterables.filter(addedRelations, filter)) {
        Preconditions.checkArgument(add.isNew());
        for (int pos = 0; pos < add.getLen(); pos++) {
            InternalVertex vertex = add.getVertex(pos);
            if (pos == 0 || !add.isLoop()) {
                if (add.isProperty())
                    mutatedProperties.put(vertex, add);
                mutations.put(vertex.longId(), add);
            }
            if (!vertex.isNew() && acquireLock(add, pos, acquireLocks)) {
                Entry entry = edgeSerializer.writeRelation(add, pos, tx);
                mutator.acquireEdgeLock(idManager.getKey(vertex.longId()), entry.getColumn());
            }
        }
        indexUpdates.addAll(indexSerializer.getIndexUpdates(add));
    }
    // 3) Collect all index update for vertices
    for (InternalVertex v : mutatedProperties.keySet()) {
        indexUpdates.addAll(indexSerializer.getIndexUpdates(v, mutatedProperties.get(v)));
    }
    // 4) Acquire index locks (deletions first)
    for (IndexSerializer.IndexUpdate update : indexUpdates) {
        if (!update.isCompositeIndex() || !update.isDeletion())
            continue;
        CompositeIndexType iIndex = (CompositeIndexType) update.getIndex();
        if (acquireLock(iIndex, acquireLocks)) {
            mutator.acquireIndexLock((StaticBuffer) update.getKey(), (Entry) update.getEntry());
        }
    }
    for (IndexSerializer.IndexUpdate update : indexUpdates) {
        if (!update.isCompositeIndex() || !update.isAddition())
            continue;
        CompositeIndexType iIndex = (CompositeIndexType) update.getIndex();
        if (acquireLock(iIndex, acquireLocks)) {
            mutator.acquireIndexLock((StaticBuffer) update.getKey(), ((Entry) update.getEntry()).getColumn());
        }
    }
    // 5) Add relation mutations
    for (Long vertexId : mutations.keySet()) {
        Preconditions.checkArgument(vertexId > 0, "Vertex has no id: %s", vertexId);
        final List<InternalRelation> edges = mutations.get(vertexId);
        final List<Entry> additions = new ArrayList<>(edges.size());
        final List<Entry> deletions = new ArrayList<>(Math.max(10, edges.size() / 10));
        for (final InternalRelation edge : edges) {
            final InternalRelationType baseType = (InternalRelationType) edge.getType();
            assert baseType.getBaseType() == null;
            for (InternalRelationType type : baseType.getRelationIndexes()) {
                if (type.getStatus() == SchemaStatus.DISABLED)
                    continue;
                for (int pos = 0; pos < edge.getArity(); pos++) {
                    if (!type.isUnidirected(Direction.BOTH) && !type.isUnidirected(EdgeDirection.fromPosition(pos)))
                        // Directionality is not covered
                        continue;
                    if (edge.getVertex(pos).longId() == vertexId) {
                        StaticArrayEntry entry = edgeSerializer.writeRelation(edge, type, pos, tx);
                        if (edge.isRemoved()) {
                            deletions.add(entry);
                        } else {
                            Preconditions.checkArgument(edge.isNew());
                            int ttl = getTTL(edge);
                            if (ttl > 0) {
                                entry.setMetaData(EntryMetaData.TTL, ttl);
                            }
                            additions.add(entry);
                        }
                    }
                }
            }
        }
        StaticBuffer vertexKey = idManager.getKey(vertexId);
        mutator.mutateEdges(vertexKey, additions, deletions);
    }
    // 6) Add index updates
    boolean has2iMods = false;
    for (IndexSerializer.IndexUpdate indexUpdate : indexUpdates) {
        assert indexUpdate.isAddition() || indexUpdate.isDeletion();
        if (indexUpdate.isCompositeIndex()) {
            final IndexSerializer.IndexUpdate<StaticBuffer, Entry> update = indexUpdate;
            if (update.isAddition())
                mutator.mutateIndex(update.getKey(), Lists.newArrayList(update.getEntry()), KCVSCache.NO_DELETIONS);
            else
                mutator.mutateIndex(update.getKey(), KeyColumnValueStore.NO_ADDITIONS, Lists.newArrayList(update.getEntry()));
        } else {
            final IndexSerializer.IndexUpdate<String, IndexEntry> update = indexUpdate;
            has2iMods = true;
            IndexTransaction itx = mutator.getIndexTransaction(update.getIndex().getBackingIndexName());
            String indexStore = ((MixedIndexType) update.getIndex()).getStoreName();
            if (update.isAddition())
                itx.add(indexStore, update.getKey(), update.getEntry(), update.getElement().isNew());
            else
                itx.delete(indexStore, update.getKey(), update.getEntry().field, update.getEntry().value, update.getElement().isRemoved());
        }
    }
    return new ModificationSummary(!mutations.isEmpty(), has2iMods);
}
Also used : LongArrayList(com.carrotsearch.hppc.LongArrayList) ArrayList(java.util.ArrayList) IndexTransaction(org.janusgraph.diskstorage.indexing.IndexTransaction) IndexEntry(org.janusgraph.diskstorage.indexing.IndexEntry) InternalRelation(org.janusgraph.graphdb.internal.InternalRelation) StaticArrayEntry(org.janusgraph.diskstorage.util.StaticArrayEntry) IndexEntry(org.janusgraph.diskstorage.indexing.IndexEntry) Entry(org.janusgraph.diskstorage.Entry) StaticArrayEntry(org.janusgraph.diskstorage.util.StaticArrayEntry) StaticBuffer(org.janusgraph.diskstorage.StaticBuffer) MixedIndexType(org.janusgraph.graphdb.types.MixedIndexType) AtomicLong(java.util.concurrent.atomic.AtomicLong) InternalVertex(org.janusgraph.graphdb.internal.InternalVertex) CompositeIndexType(org.janusgraph.graphdb.types.CompositeIndexType) InternalRelationType(org.janusgraph.graphdb.internal.InternalRelationType)

Example 44 with Entry

use of org.janusgraph.diskstorage.Entry in project janusgraph by JanusGraph.

the class ModificationDeserializer method parseRelation.

public static InternalRelation parseRelation(TransactionLogHeader.Modification modification, StandardJanusGraphTx tx) {
    Change state = modification.state;
    assert state.isProper();
    long outVertexId = modification.outVertexId;
    Entry relEntry = modification.relationEntry;
    InternalVertex outVertex = tx.getInternalVertex(outVertexId);
    // Special relation parsing, compare to {@link RelationConstructor}
    RelationCache relCache = tx.getEdgeSerializer().readRelation(relEntry, false, tx);
    assert relCache.direction == Direction.OUT;
    InternalRelationType type = (InternalRelationType) tx.getExistingRelationType(relCache.typeId);
    assert type.getBaseType() == null;
    InternalRelation rel;
    if (type.isPropertyKey()) {
        if (state == Change.REMOVED) {
            rel = new StandardVertexProperty(relCache.relationId, (PropertyKey) type, outVertex, relCache.getValue(), ElementLifeCycle.Removed);
        } else {
            rel = new CacheVertexProperty(relCache.relationId, (PropertyKey) type, outVertex, relCache.getValue(), relEntry);
        }
    } else {
        assert type.isEdgeLabel();
        InternalVertex otherVertex = tx.getInternalVertex(relCache.getOtherVertexId());
        if (state == Change.REMOVED) {
            rel = new StandardEdge(relCache.relationId, (EdgeLabel) type, outVertex, otherVertex, ElementLifeCycle.Removed);
        } else {
            rel = new CacheEdge(relCache.relationId, (EdgeLabel) type, outVertex, otherVertex, relEntry);
        }
    }
    if (state == Change.REMOVED && relCache.hasProperties()) {
        // copy over properties
        for (LongObjectCursor<Object> entry : relCache) {
            rel.setPropertyDirect(tx.getExistingPropertyKey(entry.key), entry.value);
        }
    }
    return rel;
}
Also used : RelationCache(org.janusgraph.graphdb.relations.RelationCache) EdgeLabel(org.janusgraph.core.EdgeLabel) StandardVertexProperty(org.janusgraph.graphdb.relations.StandardVertexProperty) Change(org.janusgraph.core.log.Change) InternalRelation(org.janusgraph.graphdb.internal.InternalRelation) StandardEdge(org.janusgraph.graphdb.relations.StandardEdge) Entry(org.janusgraph.diskstorage.Entry) CacheVertexProperty(org.janusgraph.graphdb.relations.CacheVertexProperty) CacheEdge(org.janusgraph.graphdb.relations.CacheEdge) InternalVertex(org.janusgraph.graphdb.internal.InternalVertex) InternalRelationType(org.janusgraph.graphdb.internal.InternalRelationType) PropertyKey(org.janusgraph.core.PropertyKey)

Example 45 with Entry

use of org.janusgraph.diskstorage.Entry in project janusgraph by JanusGraph.

the class IndexRemoveJob method process.

@Override
public void process(StaticBuffer key, Map<SliceQuery, EntryList> entries, ScanMetrics metrics) {
    // The queries are already tailored enough => everything should be removed
    try {
        BackendTransaction mutator = writeTx.getTxHandle();
        final List<Entry> deletions;
        if (entries.size() == 1) {
            deletions = entries.values().iterator().next();
        } else {
            final int size = IteratorUtils.stream(entries.values().iterator()).map(List::size).reduce(0, Integer::sum);
            deletions = new ArrayList<>(size);
            entries.values().forEach(deletions::addAll);
        }
        metrics.incrementCustom(DELETED_RECORDS_COUNT, deletions.size());
        if (isRelationTypeIndex()) {
            mutator.mutateEdges(key, KCVSCache.NO_ADDITIONS, deletions);
        } else {
            mutator.mutateIndex(key, KCVSCache.NO_ADDITIONS, deletions);
        }
    } catch (final Exception e) {
        managementSystem.rollback();
        writeTx.rollback();
        metrics.incrementCustom(FAILED_TX);
        throw new JanusGraphException(e.getMessage(), e);
    }
}
Also used : Entry(org.janusgraph.diskstorage.Entry) JanusGraphException(org.janusgraph.core.JanusGraphException) BackendTransaction(org.janusgraph.diskstorage.BackendTransaction) JanusGraphException(org.janusgraph.core.JanusGraphException)

Aggregations

Entry (org.janusgraph.diskstorage.Entry)62 StaticBuffer (org.janusgraph.diskstorage.StaticBuffer)36 StaticArrayEntry (org.janusgraph.diskstorage.util.StaticArrayEntry)29 Test (org.junit.jupiter.api.Test)23 ArrayList (java.util.ArrayList)22 StoreTransaction (org.janusgraph.diskstorage.keycolumnvalue.StoreTransaction)19 KeySliceQuery (org.janusgraph.diskstorage.keycolumnvalue.KeySliceQuery)16 EntryList (org.janusgraph.diskstorage.EntryList)15 HashMap (java.util.HashMap)12 Map (java.util.Map)11 BackendException (org.janusgraph.diskstorage.BackendException)10 List (java.util.List)9 KCVMutation (org.janusgraph.diskstorage.keycolumnvalue.KCVMutation)9 BaseTransactionConfig (org.janusgraph.diskstorage.BaseTransactionConfig)8 BufferPageTest.makeEntry (org.janusgraph.diskstorage.inmemory.BufferPageTest.makeEntry)8 Instant (java.time.Instant)7 BackendOperation (org.janusgraph.diskstorage.util.BackendOperation)6 BufferPageTest.makeStaticBuffer (org.janusgraph.diskstorage.inmemory.BufferPageTest.makeStaticBuffer)5 StaticArrayBuffer (org.janusgraph.diskstorage.util.StaticArrayBuffer)5 StaticArrayEntryList (org.janusgraph.diskstorage.util.StaticArrayEntryList)5