use of org.janusgraph.core.JanusGraphVertex in project janusgraph by JanusGraph.
the class AbstractIndexManagementIT method testRepairGraphIndex.
@Test
public void testRepairGraphIndex() throws InterruptedException, BackendException, ExecutionException {
tx.commit();
mgmt.commit();
// Load the "Graph of the Gods" sample data (WITHOUT mixed index coverage)
GraphOfTheGodsFactory.loadWithoutMixedIndex(graph, true);
// Create and enable a graph index on age
JanusGraphManagement m = graph.openManagement();
PropertyKey age = m.getPropertyKey("age");
m.buildIndex("verticesByAge", Vertex.class).addKey(age).buildCompositeIndex();
m.commit();
graph.tx().commit();
// Block until the SchemaStatus transitions to REGISTERED
assertTrue(ManagementSystem.awaitGraphIndexStatus(graph, "verticesByAge").status(SchemaStatus.REGISTERED).call().getSucceeded());
m = graph.openManagement();
JanusGraphIndex index = m.getGraphIndex("verticesByAge");
m.updateIndex(index, SchemaAction.ENABLE_INDEX);
m.commit();
graph.tx().commit();
// Block until the SchemaStatus transitions to ENABLED
assertTrue(ManagementSystem.awaitGraphIndexStatus(graph, "verticesByAge").status(SchemaStatus.ENABLED).call().getSucceeded());
// Run a query that hits the index but erroneously returns nothing because we haven't repaired yet
assertFalse(graph.query().has("age", 10000).vertices().iterator().hasNext());
// Repair
MapReduceIndexManagement mri = new MapReduceIndexManagement(graph);
m = graph.openManagement();
index = m.getGraphIndex("verticesByAge");
ScanMetrics metrics = mri.updateIndex(index, SchemaAction.REINDEX).get();
assertEquals(6, metrics.getCustom(IndexRepairJob.ADDED_RECORDS_COUNT));
// Test the index
Iterable<JanusGraphVertex> hits = graph.query().has("age", 4500).vertices();
assertNotNull(hits);
assertEquals(1, Iterables.size(hits));
JanusGraphVertex v = Iterables.getOnlyElement(hits);
assertNotNull(v);
assertEquals("neptune", v.value("name"));
}
use of org.janusgraph.core.JanusGraphVertex in project janusgraph by JanusGraph.
the class AbstractInputFormatIT method testReadSelfEdge.
@Test
public void testReadSelfEdge() throws Exception {
GraphOfTheGodsFactory.load(graph, null, true);
assertEquals(12L, (long) graph.traversal().V().count().next());
// Add a self-loop on sky with edge label "lives"; it's nonsense, but at least it needs no schema changes
JanusGraphVertex sky = graph.query().has("name", "sky").vertices().iterator().next();
assertNotNull(sky);
assertEquals("sky", sky.value("name"));
assertEquals(1L, sky.query().direction(Direction.IN).edgeCount());
assertEquals(0L, sky.query().direction(Direction.OUT).edgeCount());
assertEquals(1L, sky.query().direction(Direction.BOTH).edgeCount());
sky.addEdge("lives", sky, "reason", "testReadSelfEdge");
assertEquals(2L, sky.query().direction(Direction.IN).edgeCount());
assertEquals(1L, sky.query().direction(Direction.OUT).edgeCount());
assertEquals(3L, sky.query().direction(Direction.BOTH).edgeCount());
graph.tx().commit();
// Read the new edge using the inputformat
Graph g = getGraph();
GraphTraversalSource t = g.traversal().withComputer(SparkGraphComputer.class);
Iterator<Object> edgeIdIterator = t.V().has("name", "sky").bothE().id();
assertNotNull(edgeIdIterator);
assertTrue(edgeIdIterator.hasNext());
Set<Object> edges = Sets.newHashSet(edgeIdIterator);
assertEquals(2, edges.size());
}
use of org.janusgraph.core.JanusGraphVertex in project janusgraph by JanusGraph.
the class CassandraScanJobIT method testPartitionedVertexScan.
@Test
public void testPartitionedVertexScan() throws Exception {
tearDown();
clearGraph(getConfiguration());
WriteConfiguration partConf = getConfiguration();
open(partConf);
mgmt.makeVertexLabel("part").partition().make();
finishSchema();
JanusGraphVertex supernode = graph.addVertex("part");
for (int i = 0; i < 128; i++) {
JanusGraphVertex v = graph.addVertex("part");
v.addEdge("default", supernode);
if (0 < i && 0 == i % 4)
graph.tx().commit();
}
graph.tx().commit();
org.apache.hadoop.conf.Configuration c = new org.apache.hadoop.conf.Configuration();
c.set(ConfigElement.getPath(JanusGraphHadoopConfiguration.GRAPH_CONFIG_KEYS, true) + "." + "storage.cassandra.keyspace", getClass().getSimpleName());
c.set(ConfigElement.getPath(JanusGraphHadoopConfiguration.GRAPH_CONFIG_KEYS, true) + "." + "storage.backend", "cassandrathrift");
c.set("cassandra.input.partitioner.class", "org.apache.cassandra.dht.Murmur3Partitioner");
Job job = getVertexJobWithDefaultMapper(c);
// Should throw an exception since filter-partitioned-vertices wasn't enabled
assertFalse(job.waitForCompletion(true));
}
use of org.janusgraph.core.JanusGraphVertex in project janusgraph by JanusGraph.
the class ManagementSystem method getRelationIndex.
@Override
public RelationTypeIndex getRelationIndex(RelationType type, String name) {
Preconditions.checkArgument(type != null);
Preconditions.checkArgument(StringUtils.isNotBlank(name));
String composedName = composeRelationTypeIndexName(type, name);
// Don't use SchemaCache to make code more compact and since we don't need the extra performance here
JanusGraphVertex v = Iterables.getOnlyElement(QueryUtil.getVertices(transaction, BaseKey.SchemaName, JanusGraphSchemaCategory.getRelationTypeName(composedName)), null);
if (v == null)
return null;
assert v instanceof InternalRelationType;
return new RelationTypeIndexWrapper((InternalRelationType) v);
}
use of org.janusgraph.core.JanusGraphVertex in project janusgraph by JanusGraph.
the class VertexJobConverter method process.
@Override
public void process(StaticBuffer key, Map<SliceQuery, EntryList> entries, ScanMetrics metrics) {
long vertexId = getVertexId(key);
assert entries.get(VERTEX_EXISTS_QUERY) != null;
if (isGhostVertex(vertexId, entries.get(VERTEX_EXISTS_QUERY))) {
metrics.incrementCustom(GHOST_VERTEX_COUNT);
return;
}
JanusGraphVertex vertex = tx.getInternalVertex(vertexId);
Preconditions.checkArgument(vertex instanceof PreloadedVertex, "The bounding transaction is not configured correctly");
PreloadedVertex v = (PreloadedVertex) vertex;
v.setAccessCheck(PreloadedVertex.OPENSTAR_CHECK);
for (Map.Entry<SliceQuery, EntryList> entry : entries.entrySet()) {
SliceQuery sq = entry.getKey();
if (sq.equals(VERTEX_EXISTS_QUERY))
continue;
EntryList entryList = entry.getValue();
if (entryList.size() >= sq.getLimit())
metrics.incrementCustom(TRUNCATED_ENTRY_LISTS);
v.addToQueryCache(sq.updateLimit(Query.NO_LIMIT), entryList);
}
job.process(v, metrics);
}
Aggregations