use of org.janusgraph.diskstorage.Entry in project janusgraph by JanusGraph.
the class CassandraBinaryRecordReader method completeNextKV.
private KV completeNextKV() throws IOException {
KV completedKV = null;
boolean hasNext;
do {
hasNext = reader.nextKeyValue();
if (!hasNext) {
completedKV = incompleteKV;
incompleteKV = null;
} else {
StaticArrayBuffer key = StaticArrayBuffer.of(reader.getCurrentKey());
SortedMap<ByteBuffer, Cell> valueSortedMap = reader.getCurrentValue();
List<Entry> entries = new ArrayList<>(valueSortedMap.size());
for (Map.Entry<ByteBuffer, Cell> ent : valueSortedMap.entrySet()) {
ByteBuffer col = ent.getKey();
ByteBuffer val = ent.getValue().value();
entries.add(StaticArrayEntry.of(StaticArrayBuffer.of(col), StaticArrayBuffer.of(val)));
}
if (null == incompleteKV) {
// Initialization; this should happen just once in an instance's lifetime
incompleteKV = new KV(key);
} else if (!incompleteKV.key.equals(key)) {
// The underlying Cassandra reader has just changed to a key we haven't seen yet
// This implies that there will be no more entries for the prior key
completedKV = incompleteKV;
incompleteKV = new KV(key);
}
incompleteKV.addEntries(entries);
}
/* Loop ends when either
* A) the cassandra reader ran out of data
* or
* B) the cassandra reader switched keys, thereby completing a KV */
} while (hasNext && null == completedKV);
return completedKV;
}
use of org.janusgraph.diskstorage.Entry in project janusgraph by JanusGraph.
the class HBaseStoreManagerMutationTest method testKCVMutationToPuts.
@Test
public void testKCVMutationToPuts() throws Exception {
final Map<String, Map<StaticBuffer, KCVMutation>> storeMutationMap = new HashMap<>();
final Map<StaticBuffer, KCVMutation> rowkeyMutationMap = new HashMap<>();
final List<Long> expectedColumnsWithTTL = new ArrayList<>();
final List<Long> expectedColumnsWithoutTTL = new ArrayList<>();
final List<Long> expectedColumnDelete = new ArrayList<>();
StaticArrayEntry e = null;
StaticBuffer rowkey, col, val;
// 2 rows
for (int row = 0; row < 2; row++) {
rowkey = KeyColumnValueStoreUtil.longToByteBuffer(row);
List<Entry> additions = new ArrayList<>();
List<StaticBuffer> deletions = new ArrayList<>();
// 100 columns each row
int i;
for (i = 0; i < 100; i++) {
col = KeyColumnValueStoreUtil.longToByteBuffer(i);
val = KeyColumnValueStoreUtil.longToByteBuffer(i + 100);
e = (StaticArrayEntry) StaticArrayEntry.of(col, val);
// Set half of the columns with TTL, also vary the TTL values
if (i % 2 == 0) {
e.setMetaData(EntryMetaData.TTL, i % 10 + 1);
// Collect the columns with TTL. Only do this for one row
if (row == 1) {
expectedColumnsWithTTL.add((long) i);
}
additions.add(e);
} else {
// Collect the columns without TTL. Only do this for one row
if (row == 1) {
expectedColumnsWithoutTTL.add((long) i);
}
additions.add(e);
}
}
// Add one deletion to the row
if (row == 1) {
expectedColumnDelete.add((long) (i - 1));
}
deletions.add(e);
rowkeyMutationMap.put(rowkey, new KCVMutation(additions, deletions));
}
storeMutationMap.put("store1", rowkeyMutationMap);
HBaseStoreManager manager = new HBaseStoreManager(HBaseStorageSetup.getHBaseConfiguration());
final Map<StaticBuffer, Pair<List<Put>, Delete>> commandsPerRowKey = manager.convertToCommands(storeMutationMap, 0, 0);
// 2 rows
Assert.assertEquals(commandsPerRowKey.size(), 2);
// Verify puts
final List<Long> putColumnsWithTTL = new ArrayList<>();
final List<Long> putColumnsWithoutTTL = new ArrayList<>();
Pair<List<Put>, Delete> commands = commandsPerRowKey.values().iterator().next();
long colName;
for (Put p : commands.getFirst()) {
// In Put, Long.MAX_VALUE means no TTL
for (Map.Entry<byte[], List<Cell>> me : p.getFamilyCellMap().entrySet()) {
for (Cell c : me.getValue()) {
colName = KeyColumnValueStoreUtil.bufferToLong(new StaticArrayBuffer(CellUtil.cloneQualifier(c)));
if (p.getTTL() < Long.MAX_VALUE) {
putColumnsWithTTL.add(colName);
} else {
putColumnsWithoutTTL.add(colName);
}
}
}
}
Collections.sort(putColumnsWithoutTTL);
Collections.sort(putColumnsWithTTL);
Assert.assertArrayEquals(expectedColumnsWithoutTTL.toArray(), putColumnsWithoutTTL.toArray());
Assert.assertArrayEquals(expectedColumnsWithTTL.toArray(), putColumnsWithTTL.toArray());
// Verify deletes
final List<Long> deleteColumns = new ArrayList<>();
Delete d = commands.getSecond();
for (Map.Entry<byte[], List<Cell>> me : d.getFamilyCellMap().entrySet()) {
for (Cell c : me.getValue()) {
colName = KeyColumnValueStoreUtil.bufferToLong(new StaticArrayBuffer(CellUtil.cloneQualifier(c)));
deleteColumns.add(colName);
}
}
Collections.sort(deleteColumns);
Assert.assertArrayEquals(expectedColumnDelete.toArray(), deleteColumns.toArray());
}
use of org.janusgraph.diskstorage.Entry in project janusgraph by JanusGraph.
the class JanusGraphVertexDeserializer method readHadoopVertex.
// Read a single row from the edgestore and create a TinkerVertex corresponding to the row
// The neighboring vertices are represented by DetachedVertex instances
public TinkerVertex readHadoopVertex(final StaticBuffer key, Iterable<Entry> entries) {
// Convert key to a vertex ID
final long vertexId = idManager.getKeyID(key);
Preconditions.checkArgument(vertexId > 0);
// Partitioned vertex handling
if (idManager.isPartitionedVertex(vertexId)) {
Preconditions.checkState(setup.getFilterPartitionedVertices(), "Read partitioned vertex (ID=%s), but partitioned vertex filtering is disabled.", vertexId);
log.debug("Skipping partitioned vertex with ID {}", vertexId);
return null;
}
// Create TinkerVertex
TinkerGraph tg = TinkerGraph.open();
TinkerVertex tv = null;
// Iterate over edgestore columns to find the vertex's label relation
for (final Entry data : entries) {
RelationReader relationReader = setup.getRelationReader(vertexId);
final RelationCache relation = relationReader.parseRelation(data, false, typeManager);
if (systemTypes.isVertexLabelSystemType(relation.typeId)) {
// Found vertex Label
long vertexLabelId = relation.getOtherVertexId();
VertexLabel vl = typeManager.getExistingVertexLabel(vertexLabelId);
// Create TinkerVertex with this label
tv = getOrCreateVertex(vertexId, vl.name(), tg);
}
}
// Added this following testing
if (null == tv) {
tv = getOrCreateVertex(vertexId, null, tg);
}
Preconditions.checkState(null != tv, "Unable to determine vertex label for vertex with ID %s", vertexId);
// Iterate over and decode edgestore columns (relations) on this vertex
for (final Entry data : entries) {
try {
RelationReader relationReader = setup.getRelationReader(vertexId);
final RelationCache relation = relationReader.parseRelation(data, false, typeManager);
// Ignore system types
if (systemTypes.isSystemType(relation.typeId))
continue;
final RelationType type = typeManager.getExistingRelationType(relation.typeId);
// Ignore hidden types
if (((InternalRelationType) type).isInvisibleType())
continue;
// Decode and create the relation (edge or property)
if (type.isPropertyKey()) {
// Decode property
Object value = relation.getValue();
Preconditions.checkNotNull(value);
VertexProperty.Cardinality card = getPropertyKeyCardinality(type.name());
tv.property(card, type.name(), value, T.id, relation.relationId);
} else {
assert type.isEdgeLabel();
// Partitioned vertex handling
if (idManager.isPartitionedVertex(relation.getOtherVertexId())) {
Preconditions.checkState(setup.getFilterPartitionedVertices(), "Read edge incident on a partitioned vertex, but partitioned vertex filtering is disabled. " + "Relation ID: %s. This vertex ID: %s. Other vertex ID: %s. Edge label: %s.", relation.relationId, vertexId, relation.getOtherVertexId(), type.name());
log.debug("Skipping edge with ID {} incident on partitioned vertex with ID {} (and nonpartitioned vertex with ID {})", relation.relationId, relation.getOtherVertexId(), vertexId);
continue;
}
// Decode edge
TinkerEdge te;
// We don't know the label of the other vertex, but one must be provided
TinkerVertex adjacentVertex = getOrCreateVertex(relation.getOtherVertexId(), null, tg);
// handle self-loop edges
if (tv.equals(adjacentVertex) && isLoopAdded(tv, type.name())) {
continue;
}
if (relation.direction.equals(Direction.IN)) {
te = (TinkerEdge) adjacentVertex.addEdge(type.name(), tv, T.id, relation.relationId);
} else if (relation.direction.equals(Direction.OUT)) {
te = (TinkerEdge) tv.addEdge(type.name(), adjacentVertex, T.id, relation.relationId);
} else {
throw new RuntimeException("Direction.BOTH is not supported");
}
if (relation.hasProperties()) {
// Load relation properties
for (final LongObjectCursor<Object> next : relation) {
assert next.value != null;
RelationType rt = typeManager.getExistingRelationType(next.key);
if (rt.isPropertyKey()) {
te.property(rt.name(), next.value);
} else {
throw new RuntimeException("Metaedges are not supported");
}
}
}
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/*Since we are filtering out system relation types, we might end up with vertices that have no incident relations.
This is especially true for schema vertices. Those are filtered out. */
if (!tv.edges(Direction.BOTH).hasNext() && !tv.properties().hasNext()) {
log.trace("Vertex {} has no relations", vertexId);
return null;
}
return tv;
}
use of org.janusgraph.diskstorage.Entry in project janusgraph by JanusGraph.
the class LockCleanerRunnableTest method testPreservesLocksAtOrAfterCutoff.
/**
* Locks with timestamps equal to or numerically greater than the cleaner
* cutoff timestamp must be preserved. Test that the cleaner reads locks by
* slicing the store and then does <b>not</b> attempt to write.
*/
@Test
public void testPreservesLocksAtOrAfterCutoff() throws BackendException {
final Instant cutoff = Instant.ofEpochMilli(10L);
Entry currentLock = StaticArrayEntry.of(codec.toLockCol(cutoff, defaultLockRid, TimestampProviders.MILLI), BufferUtil.getIntBuffer(0));
Entry futureLock = StaticArrayEntry.of(codec.toLockCol(cutoff.plusMillis(1), defaultLockRid, TimestampProviders.MILLI), BufferUtil.getIntBuffer(0));
EntryList locks = StaticArrayEntryList.of(currentLock, futureLock);
// Don't increment cutoff: lockCol is exactly at the cutoff timestamp
del = new StandardLockCleanerRunnable(store, kc, tx, codec, cutoff, TimestampProviders.MILLI);
expect(store.getSlice(eq(ksq), eq(tx))).andReturn(locks);
ctrl.replay();
del.run();
}
Aggregations