use of org.janusgraph.diskstorage.Entry in project janusgraph by JanusGraph.
the class KCVSLog method writeSetting.
private void writeSetting(String identifier, final StaticBuffer column, long value) {
final StaticBuffer key = getSettingKey(identifier);
final Entry add = StaticArrayEntry.of(column, BufferUtil.getLongBuffer(value));
Boolean status = BackendOperation.execute(new BackendOperation.Transactional<Boolean>() {
@Override
public Boolean call(StoreTransaction txh) throws BackendException {
store.mutate(key, Collections.singletonList(add), KeyColumnValueStore.NO_DELETIONS, txh);
return Boolean.TRUE;
}
@Override
public String toString() {
return "writingLogSetting";
}
}, this, times, maxWriteTime);
Preconditions.checkState(status);
}
use of org.janusgraph.diskstorage.Entry in project janusgraph by JanusGraph.
the class StaticArrayEntryTest method testEntryListWithMetaSchema.
/**
* Copied from above - the only difference is using schema instances and checking the schema
*/
@Test
public void testEntryListWithMetaSchema() {
final Map<Integer, Long> entries = generateRandomEntries();
EntryList[] el = generateEntryListArray(entries, "SCHEMA_INSTANCE");
for (final EntryList anEl : el) {
// System.out.println("Iteration: " + i);
assertEquals(entries.size(), anEl.size());
int num = 0;
for (final Entry e : anEl) {
checkEntry(e, entries);
assertTrue(e.hasMetaData());
assertFalse(e.getMetaData().isEmpty());
assertEquals(metaData, e.getMetaData());
assertNull(e.getCache());
e.setCache(cache);
num++;
}
assertEquals(entries.size(), num);
final Iterator<Entry> iter = anEl.reuseIterator();
num = 0;
while (iter.hasNext()) {
final Entry e = iter.next();
assertTrue(e.hasMetaData());
assertFalse(e.getMetaData().isEmpty());
assertEquals(metaData, e.getMetaData());
assertEquals(cache, e.getCache());
checkEntry(e, entries);
num++;
}
assertEquals(entries.size(), num);
}
}
use of org.janusgraph.diskstorage.Entry in project janusgraph by JanusGraph.
the class StaticArrayEntryTest method testEntryList.
@Test
public void testEntryList() {
final Map<Integer, Long> entries = generateRandomEntries();
EntryList[] el = generateEntryListArray(entries, "INSTANCE");
for (final EntryList anEl : el) {
assertEquals(entries.size(), anEl.size());
int num = 0;
for (final Entry e : anEl) {
checkEntry(e, entries);
assertFalse(e.hasMetaData());
assertTrue(e.getMetaData().isEmpty());
assertNull(e.getCache());
e.setCache(cache);
num++;
}
assertEquals(entries.size(), num);
final Iterator<Entry> iterator = anEl.reuseIterator();
num = 0;
while (iterator.hasNext()) {
final Entry e = iterator.next();
checkEntry(e, entries);
assertFalse(e.hasMetaData());
assertTrue(e.getMetaData().isEmpty());
assertEquals(cache, e.getCache());
num++;
}
assertEquals(entries.size(), num);
}
}
use of org.janusgraph.diskstorage.Entry in project janusgraph by JanusGraph.
the class ConsistentKeyLockerTest method recordSuccessfulLockWrite.
private LockInfo recordSuccessfulLockWrite(StoreTransaction tx, long duration, TemporalUnit tu, StaticBuffer del) throws BackendException {
currentTimeNS = currentTimeNS.plusNanos(1);
expect(times.getTime()).andReturn(currentTimeNS);
final Instant lockNS = currentTimeNS;
StaticBuffer lockCol = codec.toLockCol(lockNS, defaultLockRid, times);
Entry add = StaticArrayEntry.of(lockCol, defaultLockVal);
StaticBuffer k = eq(defaultLockKey);
final List<Entry> adds = eq(Collections.singletonList(add));
final List<StaticBuffer> deletions;
if (null != del) {
deletions = eq(Collections.singletonList(del));
} else {
deletions = eq(ImmutableList.of());
}
store.mutate(k, adds, deletions, eq(tx));
expectLastCall().once();
currentTimeNS = currentTimeNS.plus(duration, tu);
expect(times.getTime()).andReturn(currentTimeNS);
ConsistentKeyLockStatus status = new ConsistentKeyLockStatus(lockNS, lockNS.plus(defaultExpireNS));
return new LockInfo(lockNS, status, lockCol);
}
use of org.janusgraph.diskstorage.Entry in project janusgraph by JanusGraph.
the class LockCleanerRunnableTest method testPreservesLocksAtOrAfterCutoff.
/**
* Locks with timestamps equal to or numerically greater than the cleaner
* cutoff timestamp must be preserved. Test that the cleaner reads locks by
* slicing the store and then does <b>not</b> attempt to write.
*/
@Test
public void testPreservesLocksAtOrAfterCutoff() throws BackendException {
final Instant cutoff = Instant.ofEpochMilli(10L);
Entry currentLock = StaticArrayEntry.of(codec.toLockCol(cutoff, defaultLockRid, TimestampProviders.MILLI), BufferUtil.getIntBuffer(0));
Entry futureLock = StaticArrayEntry.of(codec.toLockCol(cutoff.plusMillis(1), defaultLockRid, TimestampProviders.MILLI), BufferUtil.getIntBuffer(0));
EntryList locks = StaticArrayEntryList.of(currentLock, futureLock);
// Don't increment cutoff: lockCol is exactly at the cutoff timestamp
del = new StandardLockCleanerRunnable(store, kc, tx, codec, cutoff, TimestampProviders.MILLI);
expect(store.getSlice(eq(ksq), eq(tx))).andReturn(locks);
ctrl.replay();
del.run();
}
Aggregations