use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.
the class HadoopScanMapper method finishSetup.
protected void finishSetup(ModifiableHadoopConfiguration scanConf, Configuration graphConf) {
jobConf = getJobConfiguration(scanConf);
Preconditions.checkNotNull(metrics);
// Allowed to be null for jobs that specify no configuration and no configuration root
// Preconditions.checkNotNull(jobConf);
Preconditions.checkNotNull(job);
job.workerIterationStart(jobConf, graphConf, metrics);
keyFilter = job.getKeyFilter();
List<SliceQuery> sliceQueries = job.getQueries();
Preconditions.checkArgument(null != sliceQueries, "Job cannot specify null query list");
Preconditions.checkArgument(0 < sliceQueries.size(), "Job must specify at least one query");
// Assign head of getQueries() to "initialQuery"
initialQuery = sliceQueries.get(0);
// Assign tail of getQueries() to "subsequentQueries"
subsequentQueries = new ArrayList<>(sliceQueries.subList(1, sliceQueries.size()));
Preconditions.checkState(sliceQueries.size() == subsequentQueries.size() + 1);
Preconditions.checkNotNull(initialQuery);
if (0 < subsequentQueries.size()) {
// It is assumed that the first query is the grounding query if multiple queries exist
StaticBuffer start = initialQuery.getSliceStart();
Preconditions.checkArgument(start.equals(BufferUtil.zeroBuffer(1)), "Expected start of first query to be all 0s: %s", start);
StaticBuffer end = initialQuery.getSliceEnd();
Preconditions.checkArgument(end.equals(BufferUtil.oneBuffer(end.length())), "Expected end of first query to be all 1s: %s", end);
}
}
use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.
the class HadoopScanMapper method findEntriesMatchingQuery.
private EntryList findEntriesMatchingQuery(SliceQuery query, EntryList sortedEntries) {
// Inclusive
int lowestStartMatch = sortedEntries.size();
// Inclusive
int highestEndMatch = -1;
final StaticBuffer queryStart = query.getSliceStart();
final StaticBuffer queryEnd = query.getSliceEnd();
// Find the lowest matchStart s.t. query.getSliceStart <= sortedEntries.get(matchStart)
int low = 0;
int high = sortedEntries.size() - 1;
while (low <= high) {
int mid = (low + high) >>> 1;
Entry midVal = sortedEntries.get(mid);
int cmpStart = queryStart.compareTo(midVal.getColumn());
if (0 < cmpStart) {
// query lower bound exceeds entry (no match)
if (lowestStartMatch == mid + 1) {
// lowestStartMatch located
break;
}
// Move to higher list index
low = mid + 1;
} else /* (0 >= cmpStart) */
{
// entry equals or exceeds query lower bound (match, but not necessarily lowest match)
if (mid < lowestStartMatch) {
lowestStartMatch = mid;
}
// Move to a lower list index
high = mid - 1;
}
}
// so we can bypass the highestEndMatch search and just return an empty result.
if (sortedEntries.size() == lowestStartMatch) {
return EntryList.EMPTY_LIST;
}
// Find the highest matchEnd s.t. sortedEntries.get(matchEnd) < query.getSliceEnd
low = 0;
high = sortedEntries.size() - 1;
while (low <= high) {
int mid = (low + high) >>> 1;
Entry midVal = sortedEntries.get(mid);
int cmpEnd = queryEnd.compareTo(midVal.getColumn());
if (0 < cmpEnd) {
// query upper bound exceeds entry (match, not necessarily highest)
if (mid > highestEndMatch) {
highestEndMatch = mid;
}
// Move to higher list index
low = mid + 1;
} else /* (0 >= cmpEnd) */
{
// entry equals or exceeds query upper bound (no match)
if (highestEndMatch == mid - 1) {
// highestEndMatch located
break;
}
// Move to a lower list index
high = mid - 1;
}
}
if (0 <= highestEndMatch - lowestStartMatch) {
// Return sublist between indices (inclusive at both indices)
// This will be passed into subList, which interprets it exclusively
int endIndex = highestEndMatch + 1;
if (query.hasLimit()) {
endIndex = Math.min(endIndex, query.getLimit() + lowestStartMatch);
}
// TODO avoid unnecessary copy here
return EntryArrayList.of(sortedEntries.subList(lowestStartMatch, endIndex));
} else {
return EntryList.EMPTY_LIST;
}
}
use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.
the class TransactionLogHeader method serializeUserLog.
public StaticBuffer serializeUserLog(Serializer serializer, Entry sourceTxEntry, StandardTransactionId sourceTxId) {
Preconditions.checkArgument(sourceTxEntry != null && sourceTxEntry.status == LogTxStatus.PRECOMMIT && sourceTxEntry.header.transactionId == sourceTxId.getTransactionId());
StaticBuffer sourceContent = sourceTxEntry.content;
Preconditions.checkArgument(sourceContent != null && sourceContent.length() > 0);
final EnumMap<LogTxMeta, Object> meta = new EnumMap<>(LogTxMeta.class);
meta.put(LogTxMeta.SOURCE_TRANSACTION, sourceTxId);
DataOutput out = serializeHeader(serializer, 50 + sourceContent.length(), LogTxStatus.USER_LOG, meta);
out.putBytes(sourceContent);
return out.getStaticBuffer();
}
use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.
the class PartitionIDRange method getIDRanges.
public static List<PartitionIDRange> getIDRanges(final int partitionBits, final List<KeyRange> locals) {
Preconditions.checkArgument(partitionBits > 0 && partitionBits < (Integer.SIZE - 1));
Preconditions.checkArgument(locals != null && !locals.isEmpty(), "KeyRanges are empty");
final int partitionIdBound = (1 << (partitionBits));
final int backShift = Integer.SIZE - partitionBits;
List<PartitionIDRange> partitionRanges = Lists.newArrayList();
for (KeyRange local : locals) {
Preconditions.checkArgument(local.getStart().length() >= 4);
Preconditions.checkArgument(local.getEnd().length() >= 4);
if (local.getStart().equals(local.getEnd())) {
// Start=End => Partition spans entire range
partitionRanges.add(new PartitionIDRange(0, partitionIdBound, partitionIdBound));
continue;
}
int startInt = local.getStart().getInt(0);
int lowerID = startInt >>> backShift;
assert lowerID >= 0 && lowerID < partitionIdBound;
// Lower id must be inclusive, so check that we did not truncate anything!
boolean truncatedBits = (lowerID << backShift) != startInt;
StaticBuffer start = local.getAt(0);
for (int i = 4; i < start.length() && !truncatedBits; i++) {
if (start.getByte(i) != 0)
truncatedBits = true;
}
// adjust to make sure we are inclusive
if (truncatedBits)
lowerID += 1;
// upper id is exclusive
int upperID = local.getEnd().getInt(0) >>> backShift;
// Check that we haven't jumped order indicating that the interval was too small
if ((local.getStart().compareTo(local.getEnd()) < 0 && lowerID >= upperID)) {
discardRange(local);
continue;
}
// ensure that lowerID remains within range
lowerID = lowerID % partitionIdBound;
if (lowerID == upperID) {
// After re-normalizing, check for interval collision
discardRange(local);
continue;
}
partitionRanges.add(new PartitionIDRange(lowerID, upperID, partitionIdBound));
}
return partitionRanges;
}
use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.
the class IndexRepairJob method process.
@Override
public void process(JanusGraphVertex vertex, ScanMetrics metrics) {
try {
BackendTransaction mutator = writeTx.getTxHandle();
if (index instanceof RelationTypeIndex) {
RelationTypeIndexWrapper wrapper = (RelationTypeIndexWrapper) index;
InternalRelationType wrappedType = wrapper.getWrappedType();
EdgeSerializer edgeSerializer = writeTx.getEdgeSerializer();
List<Entry> additions = new ArrayList<>();
for (Object relation : vertex.query().types(indexRelationTypeName).direction(Direction.OUT).relations()) {
InternalRelation janusgraphRelation = (InternalRelation) relation;
for (int pos = 0; pos < janusgraphRelation.getArity(); pos++) {
if (!wrappedType.isUnidirected(Direction.BOTH) && !wrappedType.isUnidirected(EdgeDirection.fromPosition(pos)))
// Directionality is not covered
continue;
Entry entry = edgeSerializer.writeRelation(janusgraphRelation, wrappedType, pos, writeTx);
additions.add(entry);
}
}
StaticBuffer vertexKey = writeTx.getIdInspector().getKey(vertex.longId());
mutator.mutateEdges(vertexKey, additions, KCVSCache.NO_DELETIONS);
metrics.incrementCustom(ADDED_RECORDS_COUNT, additions.size());
} else if (index instanceof JanusGraphIndex) {
IndexType indexType = managementSystem.getSchemaVertex(index).asIndexType();
assert indexType != null;
IndexSerializer indexSerializer = graph.getIndexSerializer();
// Gather elements to index
List<JanusGraphElement> elements;
switch(indexType.getElement()) {
case VERTEX:
elements = ImmutableList.of(vertex);
break;
case PROPERTY:
elements = Lists.newArrayList();
for (JanusGraphVertexProperty p : addIndexSchemaConstraint(vertex.query(), indexType).properties()) {
elements.add(p);
}
break;
case EDGE:
elements = Lists.newArrayList();
for (Object e : addIndexSchemaConstraint(vertex.query().direction(Direction.OUT), indexType).edges()) {
elements.add((JanusGraphEdge) e);
}
break;
default:
throw new AssertionError("Unexpected category: " + indexType.getElement());
}
if (indexType.isCompositeIndex()) {
for (JanusGraphElement element : elements) {
Set<IndexSerializer.IndexUpdate<StaticBuffer, Entry>> updates = indexSerializer.reindexElement(element, (CompositeIndexType) indexType);
for (IndexSerializer.IndexUpdate<StaticBuffer, Entry> update : updates) {
log.debug("Mutating index {}: {}", indexType, update.getEntry());
mutator.mutateIndex(update.getKey(), Lists.newArrayList(update.getEntry()), KCVSCache.NO_DELETIONS);
metrics.incrementCustom(ADDED_RECORDS_COUNT);
}
}
} else {
assert indexType.isMixedIndex();
Map<String, Map<String, List<IndexEntry>>> documentsPerStore = new HashMap<>();
for (JanusGraphElement element : elements) {
indexSerializer.reindexElement(element, (MixedIndexType) indexType, documentsPerStore);
metrics.incrementCustom(DOCUMENT_UPDATES_COUNT);
}
mutator.getIndexTransaction(indexType.getBackingIndexName()).restore(documentsPerStore);
}
} else
throw new UnsupportedOperationException("Unsupported index found: " + index);
} catch (final Exception e) {
managementSystem.rollback();
writeTx.rollback();
metrics.incrementCustom(FAILED_TX);
throw new JanusGraphException(e.getMessage(), e);
}
}
Aggregations