use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.
the class EdgeSerializer method getQuery.
public SliceQuery getQuery(InternalRelationType type, Direction dir, TypedInterval[] sortKey) {
Preconditions.checkNotNull(type);
Preconditions.checkNotNull(dir);
Preconditions.checkArgument(type.isUnidirected(Direction.BOTH) || type.isUnidirected(dir));
StaticBuffer sliceStart = null, sliceEnd = null;
RelationCategory rt = type.isPropertyKey() ? RelationCategory.PROPERTY : RelationCategory.EDGE;
if (dir == Direction.BOTH) {
assert type.isEdgeLabel();
sliceStart = IDHandler.getRelationType(type.longId(), getDirID(Direction.OUT, rt), type.isInvisibleType());
sliceEnd = IDHandler.getRelationType(type.longId(), getDirID(Direction.IN, rt), type.isInvisibleType());
assert sliceStart.compareTo(sliceEnd) < 0;
sliceEnd = BufferUtil.nextBiggerBuffer(sliceEnd);
} else {
DirectionID dirID = getDirID(dir, rt);
DataOutput colStart = serializer.getDataOutput(DEFAULT_COLUMN_CAPACITY);
DataOutput colEnd = serializer.getDataOutput(DEFAULT_COLUMN_CAPACITY);
IDHandler.writeRelationType(colStart, type.longId(), dirID, type.isInvisibleType());
IDHandler.writeRelationType(colEnd, type.longId(), dirID, type.isInvisibleType());
long[] sortKeyIDs = type.getSortKey();
Preconditions.checkArgument(sortKey.length >= sortKeyIDs.length);
assert colStart.getPosition() == colEnd.getPosition();
int keyStartPos = colStart.getPosition();
int keyEndPos = -1;
for (int i = 0; i < sortKey.length && sortKey[i] != null; i++) {
PropertyKey propertyKey = sortKey[i].key;
Interval interval = sortKey[i].interval;
if (i >= sortKeyIDs.length) {
assert !type.multiplicity().isUnique(dir);
assert (propertyKey instanceof ImplicitKey) && (propertyKey == ImplicitKey.JANUSGRAPHID || propertyKey == ImplicitKey.ADJACENT_ID);
assert propertyKey != ImplicitKey.ADJACENT_ID || (i == sortKeyIDs.length);
assert propertyKey != ImplicitKey.JANUSGRAPHID || (!type.multiplicity().isConstrained() && (i == sortKeyIDs.length && propertyKey.isPropertyKey() || i == sortKeyIDs.length + 1 && propertyKey.isEdgeLabel()));
assert colStart.getPosition() == colEnd.getPosition();
assert interval == null || interval.isPoints();
keyEndPos = colStart.getPosition();
} else {
assert !type.multiplicity().isConstrained();
assert propertyKey.longId() == sortKeyIDs[i];
}
if (interval == null || interval.isEmpty()) {
break;
}
if (interval.isPoints()) {
if (propertyKey == ImplicitKey.JANUSGRAPHID || propertyKey == ImplicitKey.ADJACENT_ID) {
assert !type.multiplicity().isUnique(dir);
VariableLong.writePositiveBackward(colStart, (Long) interval.getStart());
VariableLong.writePositiveBackward(colEnd, (Long) interval.getEnd());
} else {
writeInline(colStart, propertyKey, interval.getStart(), InlineType.KEY);
writeInline(colEnd, propertyKey, interval.getEnd(), InlineType.KEY);
}
} else {
if (interval.getStart() != null)
writeInline(colStart, propertyKey, interval.getStart(), InlineType.KEY);
if (interval.getEnd() != null)
writeInline(colEnd, propertyKey, interval.getEnd(), InlineType.KEY);
switch(type.getSortOrder()) {
case ASC:
sliceStart = colStart.getStaticBuffer();
sliceEnd = colEnd.getStaticBuffer();
if (!interval.startInclusive())
sliceStart = BufferUtil.nextBiggerBuffer(sliceStart);
if (interval.endInclusive())
sliceEnd = BufferUtil.nextBiggerBuffer(sliceEnd);
break;
case DESC:
sliceEnd = colStart.getStaticBufferFlipBytes(keyStartPos, colStart.getPosition());
sliceStart = colEnd.getStaticBufferFlipBytes(keyStartPos, colEnd.getPosition());
if (interval.startInclusive())
sliceEnd = BufferUtil.nextBiggerBuffer(sliceEnd);
if (!interval.endInclusive())
sliceStart = BufferUtil.nextBiggerBuffer(sliceStart);
break;
default:
throw new AssertionError(type.getSortOrder().toString());
}
assert sliceStart.compareTo(sliceEnd) <= 0;
break;
}
}
if (sliceStart == null) {
assert sliceEnd == null && colStart.getPosition() == colEnd.getPosition();
if (keyEndPos < 0)
keyEndPos = colStart.getPosition();
switch(type.getSortOrder()) {
case ASC:
sliceStart = colStart.getStaticBuffer();
break;
case DESC:
sliceStart = colStart.getStaticBufferFlipBytes(keyStartPos, keyEndPos);
break;
default:
throw new AssertionError(type.getSortOrder().toString());
}
sliceEnd = BufferUtil.nextBiggerBuffer(sliceStart);
}
}
return new SliceQuery(sliceStart, sliceEnd);
}
use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.
the class BerkeleyJEKeyValueStore method getSlice.
@Override
public RecordIterator<KeyValueEntry> getSlice(KVQuery query, StoreTransaction txh) throws BackendException {
log.trace("beginning db={}, op=getSlice, tx={}", name, txh);
final Transaction tx = getTransaction(txh);
final StaticBuffer keyStart = query.getStart();
final StaticBuffer keyEnd = query.getEnd();
final KeySelector selector = query.getKeySelector();
final List<KeyValueEntry> result = new ArrayList<>();
final DatabaseEntry foundKey = keyStart.as(ENTRY_FACTORY);
final DatabaseEntry foundData = new DatabaseEntry();
try (final Cursor cursor = db.openCursor(tx, null)) {
OperationStatus status = cursor.getSearchKeyRange(foundKey, foundData, getLockMode(txh));
// Iterate until given condition is satisfied or end of records
while (status == OperationStatus.SUCCESS) {
StaticBuffer key = getBuffer(foundKey);
if (key.compareTo(keyEnd) >= 0)
break;
if (selector.include(key)) {
result.add(new KeyValueEntry(key, getBuffer(foundData)));
}
if (selector.reachedLimit())
break;
status = cursor.getNext(foundKey, foundData, getLockMode(txh));
}
} catch (Exception e) {
throw new PermanentBackendException(e);
}
log.trace("db={}, op=getSlice, tx={}, resultcount={}", name, txh, result.size());
return new RecordIterator<KeyValueEntry>() {
private final Iterator<KeyValueEntry> entries = result.iterator();
@Override
public boolean hasNext() {
return entries.hasNext();
}
@Override
public KeyValueEntry next() {
return entries.next();
}
@Override
public void close() {
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.
the class AstyanaxKeyColumnValueStore method getNamesSlice.
public Map<StaticBuffer, EntryList> getNamesSlice(List<StaticBuffer> keys, SliceQuery query, StoreTransaction txh) throws BackendException {
/*
* RowQuery<K,C> should be parametrized as
* RowQuery<ByteBuffer,ByteBuffer>. However, this causes the following
* compilation error when attempting to call withColumnRange on a
* RowQuery<ByteBuffer,ByteBuffer> instance:
*
* java.lang.Error: Unresolved compilation problem: The method
* withColumnRange(ByteBuffer, ByteBuffer, boolean, int) is ambiguous
* for the type RowQuery<ByteBuffer,ByteBuffer>
*
* The compiler substitutes ByteBuffer=C for both startColumn and
* endColumn, compares it to its identical twin with that type
* hard-coded, and dies.
*
*/
// Add one for last column potentially removed in CassandraHelper.makeEntryList
final int queryLimit = query.getLimit() + (query.hasLimit() ? 1 : 0);
final int pageLimit = Math.min(this.readPageSize, queryLimit);
ByteBuffer sliceStart = query.getSliceStart().asByteBuffer();
final ByteBuffer sliceEnd = query.getSliceEnd().asByteBuffer();
final RowSliceQuery rq = keyspace.prepareQuery(columnFamily).setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()).withRetryPolicy(retryPolicy.duplicate()).getKeySlice(CassandraHelper.convert(keys));
// Don't directly chain due to ambiguity resolution; see top comment
rq.withColumnRange(sliceStart, sliceEnd, false, pageLimit);
final OperationResult<Rows<ByteBuffer, ByteBuffer>> r;
try {
r = (OperationResult<Rows<ByteBuffer, ByteBuffer>>) rq.execute();
} catch (ConnectionException e) {
throw new TemporaryBackendException(e);
}
final Rows<ByteBuffer, ByteBuffer> rows = r.getResult();
final Map<StaticBuffer, EntryList> result = new HashMap<>(rows.size());
for (Row<ByteBuffer, ByteBuffer> row : rows) {
assert !result.containsKey(row.getKey());
final ByteBuffer key = row.getKey();
ColumnList<ByteBuffer> pageColumns = row.getColumns();
final List<Column<ByteBuffer>> queryColumns = new ArrayList();
Iterables.addAll(queryColumns, pageColumns);
while (pageColumns.size() == pageLimit && queryColumns.size() < queryLimit) {
final Column<ByteBuffer> lastColumn = queryColumns.get(queryColumns.size() - 1);
sliceStart = lastColumn.getName();
// No possibility of two values at the same column name, so start the
// next slice one bit after the last column found by the previous query.
// byte[] is little-endian
Integer position = null;
for (int i = sliceStart.array().length - 1; i >= 0; i--) {
if (sliceStart.array()[i] < Byte.MAX_VALUE) {
position = i;
sliceStart.array()[i]++;
break;
}
}
if (null == position) {
throw new PermanentBackendException("Column was not incrementable");
}
final RowQuery pageQuery = keyspace.prepareQuery(columnFamily).setConsistencyLevel(getTx(txh).getReadConsistencyLevel().getAstyanax()).withRetryPolicy(retryPolicy.duplicate()).getKey(row.getKey());
// Don't directly chain due to ambiguity resolution; see top comment
pageQuery.withColumnRange(sliceStart, sliceEnd, false, pageLimit);
final OperationResult<ColumnList<ByteBuffer>> pageResult;
try {
pageResult = (OperationResult<ColumnList<ByteBuffer>>) pageQuery.execute();
} catch (ConnectionException e) {
throw new TemporaryBackendException(e);
}
if (Thread.interrupted()) {
throw new TraversalInterruptedException();
}
// Reset the incremented position to avoid leaking mutations up the
// stack to callers - sliceStart.array() in fact refers to a column name
// that will be later read to deserialize an edge (since we assigned it
// via de-referencing a column from the previous query).
sliceStart.array()[position]--;
pageColumns = pageResult.getResult();
Iterables.addAll(queryColumns, pageColumns);
}
result.put(StaticArrayBuffer.of(key), CassandraHelper.makeEntryList(queryColumns, entryGetter, query.getSliceEnd(), query.getLimit()));
}
return result;
}
use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.
the class AstyanaxStoreManager method mutateMany.
@Override
public void mutateMany(Map<String, Map<StaticBuffer, KCVMutation>> batch, StoreTransaction txh) throws BackendException {
MutationBatch m = keyspaceContext.getClient().prepareMutationBatch().withAtomicBatch(atomicBatch).setConsistencyLevel(getTx(txh).getWriteConsistencyLevel().getAstyanax()).withRetryPolicy(retryPolicy.duplicate());
final MaskedTimestamp commitTime = new MaskedTimestamp(txh);
for (Map.Entry<String, Map<StaticBuffer, KCVMutation>> batchentry : batch.entrySet()) {
String storeName = batchentry.getKey();
Preconditions.checkArgument(openStores.containsKey(storeName), "Store cannot be found: " + storeName);
ColumnFamily<ByteBuffer, ByteBuffer> columnFamily = openStores.get(storeName).getColumnFamily();
Map<StaticBuffer, KCVMutation> mutations = batchentry.getValue();
for (Map.Entry<StaticBuffer, KCVMutation> ent : mutations.entrySet()) {
// The CLMs for additions and deletions are separated because
// Astyanax's operation timestamp cannot be set on a per-delete
// or per-addition basis.
KCVMutation janusgraphMutation = ent.getValue();
ByteBuffer key = ent.getKey().asByteBuffer();
if (janusgraphMutation.hasDeletions()) {
ColumnListMutation<ByteBuffer> deletions = m.withRow(columnFamily, key);
deletions.setTimestamp(commitTime.getDeletionTime(times));
for (StaticBuffer b : janusgraphMutation.getDeletions()) deletions.deleteColumn(b.as(StaticBuffer.BB_FACTORY));
}
if (janusgraphMutation.hasAdditions()) {
ColumnListMutation<ByteBuffer> updates = m.withRow(columnFamily, key);
updates.setTimestamp(commitTime.getAdditionTime(times));
for (Entry e : janusgraphMutation.getAdditions()) {
Integer ttl = (Integer) e.getMetaData().get(EntryMetaData.TTL);
if (null != ttl && ttl > 0) {
updates.putColumn(e.getColumnAs(StaticBuffer.BB_FACTORY), e.getValueAs(StaticBuffer.BB_FACTORY), ttl);
} else {
updates.putColumn(e.getColumnAs(StaticBuffer.BB_FACTORY), e.getValueAs(StaticBuffer.BB_FACTORY));
}
}
}
}
}
try {
m.execute();
} catch (ConnectionException e) {
throw new TemporaryBackendException(e);
}
sleepAfterWrite(txh, commitTime);
}
use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.
the class CassandraHelper method transformRange.
public static KeyRange transformRange(Token leftKeyExclusive, Token rightKeyInclusive) {
if (!(leftKeyExclusive instanceof BytesToken))
throw new UnsupportedOperationException();
// if left part is BytesToken, right part should be too, otherwise there is no sense in the ring
assert rightKeyInclusive instanceof BytesToken;
// l is exclusive, r is inclusive
BytesToken l = (BytesToken) leftKeyExclusive;
BytesToken r = (BytesToken) rightKeyInclusive;
byte[] leftTokenValue = l.getTokenValue();
byte[] rightTokenValue = r.getTokenValue();
Preconditions.checkArgument(leftTokenValue.length == rightTokenValue.length, "Tokens have unequal length");
int tokenLength = leftTokenValue.length;
byte[][] tokens = new byte[][] { leftTokenValue, rightTokenValue };
byte[][] plusOne = new byte[2][tokenLength];
for (int j = 0; j < 2; j++) {
boolean carry = true;
for (int i = tokenLength - 1; i >= 0; i--) {
byte b = tokens[j][i];
if (carry) {
b++;
carry = b == 0;
}
plusOne[j][i] = b;
}
}
StaticBuffer lb = StaticArrayBuffer.of(plusOne[0]);
StaticBuffer rb = StaticArrayBuffer.of(plusOne[1]);
Preconditions.checkArgument(lb.length() == tokenLength, lb.length());
Preconditions.checkArgument(rb.length() == tokenLength, rb.length());
return new KeyRange(lb, rb);
}
Aggregations