use of com.baidu.hugegraph.backend.BackendException in project incubator-hugegraph by apache.
the class KryoUtil method toKryo.
public static byte[] toKryo(Object value) {
try (ByteArrayOutputStream bos = new ByteArrayOutputStream();
Output output = new Output(bos, 256)) {
kryo().writeObject(output, value);
output.flush();
return bos.toByteArray();
} catch (IOException e) {
throw new BackendException("Failed to serialize: %s", e, value);
}
}
use of com.baidu.hugegraph.backend.BackendException in project incubator-hugegraph by apache.
the class AbstractTransaction method query.
@Watched(prefix = "tx")
public QueryResults<BackendEntry> query(Query query) {
LOG.debug("Transaction query: {}", query);
/*
* NOTE: it's dangerous if an IdQuery/ConditionQuery is empty
* check if the query is empty and its class is not the Query itself
*/
if (query.empty() && !query.getClass().equals(Query.class)) {
throw new BackendException("Query without any id or condition");
}
Query squery = this.serializer.writeQuery(query);
// Do rate limit if needed
RateLimiter rateLimiter = this.graph.readRateLimiter();
if (rateLimiter != null && query.resultType().isGraph()) {
double time = rateLimiter.acquire(1);
if (time > 0) {
LOG.debug("Waited for {}s to query", time);
}
BackendEntryIterator.checkInterrupted();
}
this.beforeRead();
try {
this.injectOlapPkIfNeeded(squery);
return new QueryResults<>(this.store.query(squery), query);
} finally {
// TODO: not complete the iteration currently
this.afterRead();
}
}
use of com.baidu.hugegraph.backend.BackendException in project incubator-hugegraph by apache.
the class StoreStateMachine method onApplyFollower.
private Future<?> onApplyFollower(ByteBuffer data) {
// Follower need to read mutation data
byte[] bytes = data.array();
// Let the backend thread do it directly
return this.context.backendExecutor().submit(() -> {
BytesBuffer buffer = LZ4Util.decompress(bytes, RaftSharedContext.BLOCK_SIZE);
buffer.forReadWritten();
StoreType type = StoreType.valueOf(buffer.read());
StoreAction action = StoreAction.valueOf(buffer.read());
try {
return this.applyCommand(type, action, buffer, false);
} catch (Throwable e) {
String title = "Failed to execute backend command";
LOG.error("{}: {}", title, action, e);
throw new BackendException(title, e);
}
});
}
use of com.baidu.hugegraph.backend.BackendException in project incubator-hugegraph by apache.
the class CassandraShard method getSplits.
/**
* Get splits of a table in specified range
* NOTE: maybe we don't need this method
* @param start: the start of range
* @param end: the end of range
* @param splitPartitions: expected partitions count per split
* @param splitSize: expected size(bytes) per split,
* splitPartitions will be ignored if splitSize is passed
* @return a list of Shard
*/
public List<Shard> getSplits(String start, String end, int splitPartitions, int splitSize) {
ExecutorService executor = new ThreadPoolExecutor(0, 128, 60L, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>());
List<Shard> splits = new ArrayList<>();
try {
List<Future<List<Shard>>> futures = new ArrayList<>();
TokenFactory tokenFactory = this.partitioner.getTokenFactory();
TokenRange tokenRange = rangeToTokenRange(new Range<>(tokenFactory.fromString(start), tokenFactory.fromString(end)));
// Canonical ranges and nodes holding replicas
Map<TokenRange, Set<Host>> masterRangeNodes = getRangeMap();
for (TokenRange range : masterRangeNodes.keySet()) {
for (TokenRange r : range.intersectWith(tokenRange)) {
// For each tokenRange, pick a live owner and ask it
// to compute bite-sized splits
futures.add(executor.submit(new SplitCallable(r, splitPartitions, splitSize)));
}
}
// Wait until we have all the results back
for (Future<List<Shard>> future : futures) {
try {
splits.addAll(future.get());
} catch (Exception e) {
throw new BackendException("Can't get cassandra shards", e);
}
}
assert splits.size() >= masterRangeNodes.size();
} finally {
executor.shutdownNow();
}
Collections.shuffle(splits, new Random(System.nanoTime()));
return splits;
}
use of com.baidu.hugegraph.backend.BackendException in project incubator-hugegraph by apache.
the class CassandraStore method commitTx.
@Override
public void commitTx() {
this.checkOpened();
CassandraSessionPool.Session session = this.sessions.session();
if (session.txState() != TxState.BEGIN) {
LOG.warn("Store {} expect state BEGIN than {} when commit()", this.store, session.txState());
}
if (!session.hasChanges()) {
session.txState(TxState.CLEAN);
LOG.debug("Store {} has nothing to commit", this.store);
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Store {} commit {} statements: {}", this.store, session.statements().size(), session.statements());
}
// TODO how to implement tx perfectly?
// Do update
session.txState(TxState.COMMITTING);
try {
session.commit();
session.txState(TxState.CLEAN);
} catch (DriverException e) {
session.txState(TxState.COMMITT_FAIL);
LOG.error("Failed to commit statements due to:", e);
assert session.statements().size() > 0;
throw new BackendException("Failed to commit %s statements: '%s'...", e, session.statements().size(), session.statements().iterator().next());
}
}
Aggregations