use of org.janusgraph.diskstorage.keycolumnvalue.KeySliceQuery in project janusgraph by JanusGraph.
the class MultiWriteKeyColumnValueStoreTest method mutateManyWritesSameKeyOnMultipleCFs.
@Test
public void mutateManyWritesSameKeyOnMultipleCFs() throws BackendException {
// must be greater than 0
final long arbitraryLong = 42;
final StaticBuffer key = KeyColumnValueStoreUtil.longToByteBuffer(arbitraryLong * arbitraryLong);
final StaticBuffer val = KeyColumnValueStoreUtil.longToByteBuffer(arbitraryLong * arbitraryLong * arbitraryLong);
final StaticBuffer col = KeyColumnValueStoreUtil.longToByteBuffer(arbitraryLong);
final StaticBuffer nextCol = KeyColumnValueStoreUtil.longToByteBuffer(arbitraryLong + 1);
final StoreTransaction directTx = manager.beginTransaction(getTxConfig());
KCVMutation km = new KCVMutation(Lists.newArrayList(StaticArrayEntry.of(col, val)), Lists.newArrayList());
Map<StaticBuffer, KCVMutation> keyColumnAndValue = ImmutableMap.of(key, km);
Map<String, Map<StaticBuffer, KCVMutation>> mutations = ImmutableMap.of(storeName1, keyColumnAndValue, storeName2, keyColumnAndValue);
manager.mutateMany(mutations, directTx);
directTx.commit();
KeySliceQuery query = new KeySliceQuery(key, col, nextCol);
List<Entry> expected = ImmutableList.of(StaticArrayEntry.of(col, val));
assertEquals(expected, store1.getSlice(query, tx));
assertEquals(expected, store2.getSlice(query, tx));
}
use of org.janusgraph.diskstorage.keycolumnvalue.KeySliceQuery in project janusgraph by JanusGraph.
the class InMemoryColumnValueStoreTest method testMultipageUpdateDelete.
@Test
public void testMultipageUpdateDelete() throws TemporaryLockingException {
int numEntries = 2511;
StoreTransaction txh = mock(StoreTransaction.class);
BaseTransactionConfig mockConfig = mock(BaseTransactionConfig.class);
when(txh.getConfiguration()).thenReturn(mockConfig);
when(mockConfig.getCustomOption(eq(STORAGE_TRANSACTIONAL))).thenReturn(true);
InMemoryColumnValueStore cvs = new InMemoryColumnValueStore();
// ColumnValueStore cvs = new DeflatedEntryColumnValueStore(false);
List<Entry> additions = generateEntries(0, numEntries, "orig");
cvs.mutate(additions, Collections.emptyList(), txh);
EntryList result = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
makeStaticBuffer(VERY_END)), txh);
assertEquals(additions.size(), result.size());
int windowStart = 494;
int windowEnd = 2002;
// update
List<Entry> updates = generateEntries(windowStart, windowEnd, "updated");
cvs.mutate(updates, Collections.emptyList(), txh);
result = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
makeStaticBuffer(VERY_END)), txh);
assertEquals(additions.size(), result.size());
for (int i = 0; i < result.size(); i++) {
if (windowStart < i && i < windowEnd) {
assertEquals(updates.get(i - windowStart), result.get(i));
} else {
assertEquals(additions.get(i), result.get(i));
}
}
// delete
List<StaticBuffer> deletions = new ArrayList<>(windowEnd - windowStart);
deletions.addAll(additions.subList(windowStart, windowEnd).stream().map(Entry::getColumn).collect(Collectors.toList()));
cvs.mutate(Collections.emptyList(), deletions, txh);
result = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
makeStaticBuffer(VERY_END)), txh);
assertEquals(additions.size() - deletions.size(), result.size());
}
use of org.janusgraph.diskstorage.keycolumnvalue.KeySliceQuery in project janusgraph by JanusGraph.
the class InMemoryColumnValueStoreTest method testPagingAndFragmentation.
@Test
public void testPagingAndFragmentation() throws TemporaryLockingException {
int pageSize = InMemoryColumnValueStore.DEF_PAGE_SIZE;
StoreTransaction txh = mock(StoreTransaction.class);
BaseTransactionConfig mockConfig = mock(BaseTransactionConfig.class);
when(txh.getConfiguration()).thenReturn(mockConfig);
when(mockConfig.getCustomOption(eq(STORAGE_TRANSACTIONAL))).thenReturn(true);
InMemoryColumnValueStore cvs = new InMemoryColumnValueStore();
List<Entry> additions = generateEntries(0, pageSize * 5 + pageSize / 2, "orig");
cvs.mutate(additions, Collections.emptyList(), txh);
// We inserted more than pagesize in one go, so the store should switch to multipage buffer - but, as currently implemented,
// this doesn't get immediately broken up into multiple pages, instead it will hold a single "oversized" page.
// The break-up will only happen if there is a consequent update
assertEquals(1, cvs.numPages(txh));
// emulate update so that the single "oversized" page will be broken up into multiple pages of correct size
cvs.mutate(additions.subList(1, 3), Collections.emptyList(), txh);
assertEquals(6, cvs.numPages(txh));
int numDeleted = 0;
int windowStart = pageSize - pageSize / 3;
int windowEnd = pageSize + pageSize / 3;
// this should remove parts of page0 and page1
List<StaticBuffer> deletions = new ArrayList<>(windowEnd - windowStart);
deletions.addAll(additions.subList(windowStart, windowEnd).stream().map(Entry::getColumn).collect(Collectors.toList()));
cvs.mutate(Collections.emptyList(), deletions, txh);
numDeleted += windowEnd - windowStart;
EntryList result = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
makeStaticBuffer(VERY_END)), txh);
assertEquals(additions.size() - numDeleted, result.size());
SharedEntryBufferFragmentationReport report = cvs.createFragmentationReport(txh);
assertEquals(6, report.getPageCount());
assertEquals(3, report.getFragmentedPageCount());
// since only 1/3 of each page is removed, the remains won't fit into one page anyway, so not deemed compressable
assertEquals(0, report.getCompressableChunksCount());
assertEquals(0, report.getCompressablePageCount());
windowStart = pageSize * 4 - pageSize / 3;
windowEnd = pageSize * 4 + pageSize / 3;
// this should remove parts of page3 and page 4
deletions.clear();
deletions.addAll(additions.subList(windowStart, windowEnd).stream().map(Entry::getColumn).collect(Collectors.toList()));
cvs.mutate(Collections.emptyList(), deletions, txh);
numDeleted += windowEnd - windowStart;
result = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
makeStaticBuffer(VERY_END)), txh);
assertEquals(additions.size() - numDeleted, result.size());
report = cvs.createFragmentationReport(txh);
assertEquals(6, report.getPageCount());
assertEquals(5, report.getFragmentedPageCount());
// we now have pages 3 & 4 which are 2/3 full, PLUS page 5 which is half full => 3 pages compressable into 2
assertEquals(1, report.getCompressableChunksCount());
assertEquals(3, report.getCompressablePageCount());
assertEquals(1, report.getAchievablePageReduction());
cvs.quickDefragment(txh);
EntryList result2 = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
makeStaticBuffer(VERY_END)), txh);
assertEquals(additions.size() - numDeleted, result2.size());
for (int i = 0; i < result2.size(); i++) {
assertEquals(result.get(i), result2.get(i));
}
// after quick defrag, we should have 5 pages in total, page 0 & 1 still fragmented, page 4 also not full
report = cvs.createFragmentationReport(txh);
assertEquals(5, report.getPageCount());
assertEquals(3, report.getFragmentedPageCount());
assertEquals(0, report.getCompressableChunksCount());
assertEquals(0, report.getCompressablePageCount());
assertEquals(0, report.getAchievablePageReduction());
windowStart = pageSize - pageSize / 2;
windowEnd = pageSize + pageSize / 2 + 1;
// this should remove half of page0 and page1 each
deletions.clear();
deletions.addAll(additions.subList(windowStart, windowEnd).stream().map(Entry::getColumn).collect(Collectors.toList()));
cvs.mutate(Collections.emptyList(), deletions, txh);
numDeleted += (pageSize / 2 - pageSize / 3) * 2 + 1;
result = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
makeStaticBuffer(VERY_END)), txh);
assertEquals(additions.size() - numDeleted, result.size());
// now two first pages should become collapsible into one
report = cvs.createFragmentationReport(txh);
assertEquals(5, report.getPageCount());
assertEquals(3, report.getFragmentedPageCount());
assertEquals(1, report.getCompressableChunksCount());
assertEquals(2, report.getCompressablePageCount());
assertEquals(1, report.getAchievablePageReduction());
cvs.quickDefragment(txh);
result2 = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
makeStaticBuffer(VERY_END)), txh);
assertEquals(additions.size() - numDeleted, result2.size());
for (int i = 0; i < result2.size(); i++) {
assertEquals(result.get(i), result2.get(i));
}
// two first pages collapsed into one which is one entry short of full
report = cvs.createFragmentationReport(txh);
assertEquals(4, report.getPageCount());
assertEquals(2, report.getFragmentedPageCount());
assertEquals(0, report.getCompressableChunksCount());
assertEquals(0, report.getCompressablePageCount());
assertEquals(0, report.getAchievablePageReduction());
}
use of org.janusgraph.diskstorage.keycolumnvalue.KeySliceQuery in project janusgraph by JanusGraph.
the class InMemoryColumnValueStoreTest method testMultipageDelete.
@Test
public void testMultipageDelete() throws TemporaryLockingException {
int numEntries = 1001;
StoreTransaction txh = mock(StoreTransaction.class);
BaseTransactionConfig mockConfig = mock(BaseTransactionConfig.class);
when(txh.getConfiguration()).thenReturn(mockConfig);
when(mockConfig.getCustomOption(eq(STORAGE_TRANSACTIONAL))).thenReturn(true);
InMemoryColumnValueStore cvs = new InMemoryColumnValueStore();
// ColumnValueStore cvs = new DeflatedEntryColumnValueStore(false);
List<Entry> additions = generateEntries(0, numEntries, "orig");
cvs.mutate(additions, Collections.emptyList(), txh);
EntryList result = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
makeStaticBuffer(VERY_END)), txh);
assertEquals(additions.size(), result.size());
int windowStart = 494;
int windowEnd = 501;
List<StaticBuffer> deletions = new ArrayList<>(windowEnd - windowStart);
deletions.addAll(additions.subList(windowStart, windowEnd).stream().map(Entry::getColumn).collect(Collectors.toList()));
cvs.mutate(Collections.emptyList(), deletions, txh);
result = cvs.getSlice(new KeySliceQuery(makeStaticBuffer("someRow"), makeStaticBuffer(VERY_START), // if we pass COL_END, it doesn't get included
makeStaticBuffer(VERY_END)), txh);
assertEquals(additions.size() - deletions.size(), result.size());
}
use of org.janusgraph.diskstorage.keycolumnvalue.KeySliceQuery in project janusgraph by JanusGraph.
the class KeyColumnValueStoreTest method checkSlice.
public void checkSlice(String[][] values, Set<KeyColumn> removed, int key, int start, int end, int limit) throws BackendException {
tx.rollback();
tx = startTx();
List<Entry> entries;
if (limit <= 0)
entries = store.getSlice(new KeySliceQuery(KeyValueStoreUtil.getBuffer(key), KeyValueStoreUtil.getBuffer(start), KeyValueStoreUtil.getBuffer(end)), tx);
else
entries = store.getSlice(new KeySliceQuery(KeyValueStoreUtil.getBuffer(key), KeyValueStoreUtil.getBuffer(start), KeyValueStoreUtil.getBuffer(end)).setLimit(limit), tx);
int pos = 0;
for (int i = start; i < end; i++) {
if (removed.contains(new KeyColumn(key, i))) {
log.debug("Skipping deleted ({},{})", key, i);
continue;
}
if (limit <= 0 || pos < limit) {
log.debug("Checking k={}[c_start={},c_end={}](limit={}): column index={}/pos={}", key, start, end, limit, i, pos);
assertTrue(entries.size() > pos);
Entry entry = entries.get(pos);
int col = KeyValueStoreUtil.getID(entry.getColumn());
String str = KeyValueStoreUtil.getString(entry.getValueAs(StaticBuffer.STATIC_FACTORY));
assertEquals(i, col);
assertEquals(values[key][i], str);
}
pos++;
}
assertNotNull(entries);
if (limit > 0 && pos > limit)
assertEquals(limit, entries.size());
else
assertEquals(pos, entries.size());
}
Aggregations