use of org.janusgraph.core.RelationType in project janusgraph by JanusGraph.
the class IndexSelectionUtil method getKeyIndexesForCondition.
private static Iterable<IndexType> getKeyIndexesForCondition(Condition<JanusGraphElement> condition) {
if (condition instanceof PredicateCondition) {
final RelationType type = ((PredicateCondition<RelationType, JanusGraphElement>) condition).getKey();
Preconditions.checkArgument(type != null && type.isPropertyKey());
return ((InternalRelationType) type).getKeyIndexes();
}
return Collections.emptySet();
}
use of org.janusgraph.core.RelationType in project janusgraph by JanusGraph.
the class MapReduceIndexManagement method updateIndex.
/**
* Updates the provided index according to the given {@link SchemaAction}.
* Only {@link SchemaAction#REINDEX} and {@link SchemaAction#REMOVE_INDEX} are supported.
*
* @param index the index to process
* @param updateAction either {@code REINDEX} or {@code REMOVE_INDEX}
* @return a future that returns immediately;
* this method blocks until the Hadoop MapReduce job completes
*/
// TODO make this future actually async and update javadoc @return accordingly
public JanusGraphManagement.IndexJobFuture updateIndex(Index index, SchemaAction updateAction) throws BackendException {
Preconditions.checkNotNull(index, "Index parameter must not be null", index);
Preconditions.checkNotNull(updateAction, "%s parameter must not be null", SchemaAction.class.getSimpleName());
Preconditions.checkArgument(SUPPORTED_ACTIONS.contains(updateAction), "Only these %s parameters are supported: %s (was given %s)", SchemaAction.class.getSimpleName(), SUPPORTED_ACTIONS_STRING, updateAction);
Preconditions.checkArgument(RelationTypeIndex.class.isAssignableFrom(index.getClass()) || JanusGraphIndex.class.isAssignableFrom(index.getClass()), "Index %s has class %s: must be a %s or %s (or subtype)", index.getClass(), RelationTypeIndex.class.getSimpleName(), JanusGraphIndex.class.getSimpleName());
org.apache.hadoop.conf.Configuration hadoopConf = new org.apache.hadoop.conf.Configuration();
ModifiableHadoopConfiguration janusGraphMapReduceConfiguration = ModifiableHadoopConfiguration.of(JanusGraphHadoopConfiguration.MAPRED_NS, hadoopConf);
// The job we'll execute to either REINDEX or REMOVE_INDEX
final Class<? extends IndexUpdateJob> indexJobClass;
final Class<? extends Mapper> mapperClass;
// The class of the IndexUpdateJob and the Mapper that will be used to run it (VertexScanJob vs ScanJob)
if (updateAction.equals(SchemaAction.REINDEX)) {
indexJobClass = IndexRepairJob.class;
mapperClass = HadoopVertexScanMapper.class;
} else if (updateAction.equals(SchemaAction.REMOVE_INDEX)) {
indexJobClass = IndexRemoveJob.class;
mapperClass = HadoopScanMapper.class;
} else {
// Shouldn't get here -- if this exception is ever thrown, update SUPPORTED_ACTIONS
throw new IllegalStateException("Unrecognized " + SchemaAction.class.getSimpleName() + ": " + updateAction);
}
// The column family that serves as input to the IndexUpdateJob
final String readCF;
if (RelationTypeIndex.class.isAssignableFrom(index.getClass())) {
readCF = Backend.EDGESTORE_NAME;
} else {
JanusGraphIndex graphIndex = (JanusGraphIndex) index;
if (graphIndex.isMixedIndex() && !updateAction.equals(SchemaAction.REINDEX))
throw new UnsupportedOperationException("External mixed indexes must be removed in the indexing system directly.");
Preconditions.checkState(JanusGraphIndex.class.isAssignableFrom(index.getClass()));
if (updateAction.equals(SchemaAction.REMOVE_INDEX))
readCF = Backend.INDEXSTORE_NAME;
else
readCF = Backend.EDGESTORE_NAME;
}
janusGraphMapReduceConfiguration.set(JanusGraphHadoopConfiguration.COLUMN_FAMILY_NAME, readCF);
// The MapReduce InputFormat class based on the open graph's store manager
final Class<? extends InputFormat> inputFormat;
final Class<? extends KeyColumnValueStoreManager> storeManagerClass = graph.getBackend().getStoreManagerClass();
if (CASSANDRA_STORE_MANAGER_CLASSES.contains(storeManagerClass)) {
inputFormat = CassandraBinaryInputFormat.class;
// Set the partitioner
IPartitioner part = ((AbstractCassandraStoreManager) graph.getBackend().getStoreManager()).getCassandraPartitioner();
hadoopConf.set("cassandra.input.partitioner.class", part.getClass().getName());
} else if (HBASE_STORE_MANAGER_CLASSES.contains(storeManagerClass)) {
inputFormat = HBaseBinaryInputFormat.class;
} else {
throw new IllegalArgumentException("Store manager class " + storeManagerClass + "is not supported");
}
// The index name and relation type name (if the latter is applicable)
final String indexName = index.name();
final RelationType relationType = RelationTypeIndex.class.isAssignableFrom(index.getClass()) ? ((RelationTypeIndex) index).getType() : null;
final String relationTypeName = relationType == null ? StringUtils.EMPTY : relationType.name();
Preconditions.checkNotNull(indexName);
// Set the class of the IndexUpdateJob
janusGraphMapReduceConfiguration.set(JanusGraphHadoopConfiguration.SCAN_JOB_CLASS, indexJobClass.getName());
// Set the configuration of the IndexUpdateJob
copyIndexJobKeys(hadoopConf, indexName, relationTypeName);
janusGraphMapReduceConfiguration.set(JanusGraphHadoopConfiguration.SCAN_JOB_CONFIG_ROOT, GraphDatabaseConfiguration.class.getName() + "#JOB_NS");
// Copy the StandardJanusGraph configuration under JanusGraphHadoopConfiguration.GRAPH_CONFIG_KEYS
org.apache.commons.configuration.Configuration localConfiguration = graph.getConfiguration().getLocalConfiguration();
localConfiguration.clearProperty(Graph.GRAPH);
copyInputKeys(hadoopConf, localConfiguration);
String jobName = HadoopScanMapper.class.getSimpleName() + "[" + indexJobClass.getSimpleName() + "]";
try {
return new CompletedJobFuture(HadoopScanRunner.runJob(hadoopConf, inputFormat, jobName, mapperClass));
} catch (Exception e) {
return new FailedJobFuture(e);
}
}
use of org.janusgraph.core.RelationType in project grakn by graknlabs.
the class TxFactoryJanus method makeIndicesVertexCentric.
private static void makeIndicesVertexCentric(JanusGraphManagement management) {
ResourceBundle keys = ResourceBundle.getBundle("indices-edges");
Set<String> edgeLabels = keys.keySet();
for (String edgeLabel : edgeLabels) {
String[] propertyKeyStrings = keys.getString(edgeLabel).split(",");
// Get all the property keys we need
Set<PropertyKey> propertyKeys = stream(propertyKeyStrings).map(keyId -> {
PropertyKey key = management.getPropertyKey(keyId);
if (key == null) {
throw new RuntimeException("Trying to create edge index on label [" + edgeLabel + "] but the property [" + keyId + "] does not exist");
}
return key;
}).collect(Collectors.toSet());
// Get the edge and indexing information
RelationType relationType = management.getRelationType(edgeLabel);
EdgeLabel label = management.getEdgeLabel(edgeLabel);
// Create index on each property key
for (PropertyKey key : propertyKeys) {
if (management.getRelationIndex(relationType, edgeLabel + "by" + key.name()) == null) {
management.buildEdgeIndex(label, edgeLabel + "by" + key.name(), Direction.BOTH, Order.decr, key);
}
}
// Create index on all property keys
String propertyKeyId = propertyKeys.stream().map(Namifiable::name).collect(Collectors.joining("_"));
if (management.getRelationIndex(relationType, edgeLabel + "by" + propertyKeyId) == null) {
PropertyKey[] allKeys = propertyKeys.toArray(new PropertyKey[propertyKeys.size()]);
management.buildEdgeIndex(label, edgeLabel + "by" + propertyKeyId, Direction.BOTH, Order.decr, allKeys);
}
}
}
use of org.janusgraph.core.RelationType in project janusgraph by JanusGraph.
the class EdgeSerializer method parseRelation.
@Override
public RelationCache parseRelation(Entry data, boolean excludeProperties, TypeInspector tx) {
ReadBuffer in = data.asReadBuffer();
RelationTypeParse typeAndDir = IDHandler.readRelationType(in);
long typeId = typeAndDir.typeId;
Direction dir = typeAndDir.dirID.getDirection();
RelationType relationType = tx.getExistingRelationType(typeId);
InternalRelationType def = (InternalRelationType) relationType;
Multiplicity multiplicity = def.multiplicity();
long[] keySignature = def.getSortKey();
long relationId;
Object other;
int startKeyPos = in.getPosition();
int endKeyPos = 0;
if (relationType.isEdgeLabel()) {
long otherVertexId;
if (multiplicity.isConstrained()) {
if (multiplicity.isUnique(dir)) {
otherVertexId = VariableLong.readPositive(in);
} else {
in.movePositionTo(data.getValuePosition());
otherVertexId = VariableLong.readPositiveBackward(in);
in.movePositionTo(data.getValuePosition());
}
relationId = VariableLong.readPositive(in);
} else {
in.movePositionTo(data.getValuePosition());
relationId = VariableLong.readPositiveBackward(in);
otherVertexId = VariableLong.readPositiveBackward(in);
endKeyPos = in.getPosition();
in.movePositionTo(data.getValuePosition());
}
other = otherVertexId;
} else {
assert relationType.isPropertyKey();
PropertyKey key = (PropertyKey) relationType;
if (multiplicity.isConstrained()) {
other = readPropertyValue(in, key);
relationId = VariableLong.readPositive(in);
} else {
in.movePositionTo(data.getValuePosition());
relationId = VariableLong.readPositiveBackward(in);
endKeyPos = in.getPosition();
in.movePositionTo(data.getValuePosition());
other = readPropertyValue(in, key);
}
Preconditions.checkNotNull(other, "Encountered error in deserializer [null value returned]. Check serializer compatibility.");
}
if (!excludeProperties) {
LongObjectHashMap<Object> properties = new LongObjectHashMap<>(4);
if (!multiplicity.isConstrained() && keySignature.length > 0) {
int currentPos = in.getPosition();
// Read sort key which only exists if type is not unique in this direction
assert endKeyPos > startKeyPos;
// after reading the ids, we are on the last byte of the key
int keyLength = endKeyPos - startKeyPos;
in.movePositionTo(startKeyPos);
ReadBuffer inKey = in;
if (def.getSortOrder() == Order.DESC)
inKey = in.subrange(keyLength, true);
readInlineTypes(keySignature, properties, inKey, tx, InlineType.KEY);
in.movePositionTo(currentPos);
}
// read value signature
readInlineTypes(def.getSignature(), properties, in, tx, InlineType.SIGNATURE);
// Third: read rest
while (in.hasRemaining()) {
PropertyKey type = tx.getExistingPropertyKey(IDHandler.readInlineRelationType(in));
Object propertyValue = readInline(in, type, InlineType.NORMAL);
assert propertyValue != null;
properties.put(type.longId(), propertyValue);
}
if (data.hasMetaData()) {
for (Map.Entry<EntryMetaData, Object> metas : data.getMetaData().entrySet()) {
ImplicitKey key = ImplicitKey.MetaData2ImplicitKey.get(metas.getKey());
if (key != null) {
assert metas.getValue() != null;
properties.put(key.longId(), metas.getValue());
}
}
}
return new RelationCache(dir, typeId, relationId, other, properties);
} else {
return new RelationCache(dir, typeId, relationId, other);
}
}
use of org.janusgraph.core.RelationType in project janusgraph by JanusGraph.
the class JanusGraphVertexDeserializer method readHadoopVertex.
// Read a single row from the edgestore and create a TinkerVertex corresponding to the row
// The neighboring vertices are represented by DetachedVertex instances
public TinkerVertex readHadoopVertex(final StaticBuffer key, Iterable<Entry> entries) {
// Convert key to a vertex ID
final long vertexId = idManager.getKeyID(key);
Preconditions.checkArgument(vertexId > 0);
// Partitioned vertex handling
if (idManager.isPartitionedVertex(vertexId)) {
Preconditions.checkState(setup.getFilterPartitionedVertices(), "Read partitioned vertex (ID=%s), but partitioned vertex filtering is disabled.", vertexId);
log.debug("Skipping partitioned vertex with ID {}", vertexId);
return null;
}
// Create TinkerVertex
TinkerGraph tg = TinkerGraph.open();
TinkerVertex tv = null;
// Iterate over edgestore columns to find the vertex's label relation
for (final Entry data : entries) {
RelationReader relationReader = setup.getRelationReader();
final RelationCache relation = relationReader.parseRelation(data, false, typeManager);
if (systemTypes.isVertexLabelSystemType(relation.typeId)) {
// Found vertex Label
long vertexLabelId = relation.getOtherVertexId();
VertexLabel vl = typeManager.getExistingVertexLabel(vertexLabelId);
// Create TinkerVertex with this label
tv = getOrCreateVertex(vertexId, vl.name(), tg);
} else if (systemTypes.isTypeSystemType(relation.typeId)) {
log.trace("Vertex {} is a system vertex", vertexId);
return null;
}
}
// Added this following testing
if (null == tv) {
tv = getOrCreateVertex(vertexId, null, tg);
}
Preconditions.checkNotNull(tv, "Unable to determine vertex label for vertex with ID %s", vertexId);
// Iterate over and decode edgestore columns (relations) on this vertex
for (final Entry data : entries) {
try {
RelationReader relationReader = setup.getRelationReader();
final RelationCache relation = relationReader.parseRelation(data, false, typeManager);
// Ignore system types
if (systemTypes.isSystemType(relation.typeId))
continue;
final RelationType type = typeManager.getExistingRelationType(relation.typeId);
// Ignore hidden types
if (((InternalRelationType) type).isInvisibleType())
continue;
// Decode and create the relation (edge or property)
if (type.isPropertyKey()) {
// Decode property
Object value = relation.getValue();
Preconditions.checkNotNull(value);
VertexProperty.Cardinality card = getPropertyKeyCardinality(type.name());
VertexProperty<Object> vp = tv.property(card, type.name(), value, T.id, relation.relationId);
// Decode meta properties
decodeProperties(relation, vp);
} else {
assert type.isEdgeLabel();
// Partitioned vertex handling
if (idManager.isPartitionedVertex(relation.getOtherVertexId())) {
Preconditions.checkState(setup.getFilterPartitionedVertices(), "Read edge incident on a partitioned vertex, but partitioned vertex filtering is disabled. " + "Relation ID: %s. This vertex ID: %s. Other vertex ID: %s. Edge label: %s.", relation.relationId, vertexId, relation.getOtherVertexId(), type.name());
log.debug("Skipping edge with ID {} incident on partitioned vertex with ID {} (and nonpartitioned vertex with ID {})", relation.relationId, relation.getOtherVertexId(), vertexId);
continue;
}
// Decode edge
TinkerEdge te;
// We don't know the label of the other vertex, but one must be provided
TinkerVertex adjacentVertex = getOrCreateVertex(relation.getOtherVertexId(), null, tg);
// skip self-loop edges that were already processed, but from a different direction
if (tv.equals(adjacentVertex) && edgeExists(tv, type, relation)) {
continue;
}
if (relation.direction.equals(Direction.IN)) {
te = (TinkerEdge) adjacentVertex.addEdge(type.name(), tv, T.id, relation.relationId);
} else if (relation.direction.equals(Direction.OUT)) {
te = (TinkerEdge) tv.addEdge(type.name(), adjacentVertex, T.id, relation.relationId);
} else {
throw new RuntimeException("Direction.BOTH is not supported");
}
decodeProperties(relation, te);
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
return tv;
}
Aggregations