Search in sources :

Example 6 with Shard

use of com.baidu.hugegraph.backend.store.Shard in project incubator-hugegraph by apache.

the class RangeTest method testRangeOfOnlyOneRegion.

@Test
public void testRangeOfOnlyOneRegion() {
    // The startKey and endKey is "", if it's the only region of table
    Range range = new Range(START, END);
    List<Shard> shards = range.splitEven(0);
    Assert.assertEquals(1, shards.size());
    Assert.assertEquals(Strings.EMPTY, shards.get(0).start());
    Assert.assertEquals(Strings.EMPTY, shards.get(0).end());
    shards = range.splitEven(1);
    Assert.assertEquals(1, shards.size());
    Assert.assertEquals(Strings.EMPTY, shards.get(0).start());
    Assert.assertEquals(Strings.EMPTY, shards.get(0).end());
    shards = range.splitEven(3);
    Assert.assertEquals(3, shards.size());
    Assert.assertEquals(Strings.EMPTY, shards.get(0).start());
    Assert.assertEquals("VVVVVVVVVVVVVVVVVVVVVQ==", shards.get(0).end());
    Assert.assertEquals("VVVVVVVVVVVVVVVVVVVVVQ==", shards.get(1).start());
    Assert.assertEquals("qqqqqqqqqqqqqqqqqqqqqg==", shards.get(1).end());
    Assert.assertEquals("qqqqqqqqqqqqqqqqqqqqqg==", shards.get(2).start());
    Assert.assertEquals(Strings.EMPTY, shards.get(2).end());
    for (int i = 4; i < 100; i++) {
        range.splitEven(i);
    }
}
Also used : Range(com.baidu.hugegraph.backend.store.BackendTable.ShardSplitter.Range) Shard(com.baidu.hugegraph.backend.store.Shard) Test(org.junit.Test)

Example 7 with Shard

use of com.baidu.hugegraph.backend.store.Shard in project incubator-hugegraph by apache.

the class RangeTest method testRangeOfRegionWithStartKey.

@Test
public void testRangeOfRegionWithStartKey() {
    byte[] start = new byte[] { -3, 0x35, 0x30, 0x30, 0x30, 0x77, 0x4e, -37, 0x31, 0x31, 0x30, 0x30, 0x30, 0x37, 0x36, 0x33 };
    Range range = new Range(start, END);
    List<Shard> shards = range.splitEven(0);
    Assert.assertEquals(1, shards.size());
    Assert.assertEquals("/TUwMDB3TtsxMTAwMDc2Mw==", shards.get(0).start());
    Assert.assertEquals(Strings.EMPTY, shards.get(0).end());
    shards = range.splitEven(1);
    Assert.assertEquals(1, shards.size());
    Assert.assertEquals("/TUwMDB3TtsxMTAwMDc2Mw==", shards.get(0).start());
    Assert.assertEquals(Strings.EMPTY, shards.get(0).end());
    shards = range.splitEven(2);
    Assert.assertEquals(2, shards.size());
    Assert.assertEquals("/TUwMDB3TtsxMTAwMDc2Mw==", shards.get(0).start());
    Assert.assertEquals("/pqYGBg7p22YmJgYGBubGQ==", shards.get(0).end());
    Assert.assertEquals("/pqYGBg7p22YmJgYGBubGQ==", shards.get(1).start());
    Assert.assertEquals(Strings.EMPTY, shards.get(1).end());
    for (int i = 3; i < 100; i++) {
        range.splitEven(i);
    }
}
Also used : Range(com.baidu.hugegraph.backend.store.BackendTable.ShardSplitter.Range) Shard(com.baidu.hugegraph.backend.store.Shard) Test(org.junit.Test)

Example 8 with Shard

use of com.baidu.hugegraph.backend.store.Shard in project incubator-hugegraph by apache.

the class CassandraShard method getSplits.

/**
 * Get splits of a table
 * @param splitPartitions: expected partitions count per split
 * @param splitSize: expected size(bytes) per split,
 *        splitPartitions will be ignored if splitSize is passed
 * @return a list of Shard
 */
public List<Shard> getSplits(long splitPartitions, long splitSize) {
    // Canonical ranges, split into pieces, fetch the splits in parallel
    ExecutorService executor = new ThreadPoolExecutor(0, 128, 60L, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>());
    List<Shard> splits = new ArrayList<>();
    try {
        List<Future<List<Shard>>> futures = new ArrayList<>();
        // Canonical ranges and nodes holding replicas
        Map<TokenRange, Set<Host>> masterRangeNodes = getRangeMap();
        for (TokenRange range : masterRangeNodes.keySet()) {
            /*
                 * For each token range, pick a live owner and ask it to
                 * compute bite-sized splits.
                 */
            futures.add(executor.submit(new SplitCallable(range, splitPartitions, splitSize)));
        }
        // Wait until we have all the results back
        for (Future<List<Shard>> future : futures) {
            try {
                splits.addAll(future.get());
            } catch (Exception e) {
                throw new BackendException("Can't get cassandra shards", e);
            }
        }
        assert splits.size() > masterRangeNodes.size();
    } finally {
        executor.shutdownNow();
    }
    Collections.shuffle(splits, new Random(System.nanoTime()));
    return splits;
}
Also used : ResultSet(com.datastax.driver.core.ResultSet) Set(java.util.Set) ArrayList(java.util.ArrayList) BackendException(com.baidu.hugegraph.backend.BackendException) BackendException(com.baidu.hugegraph.backend.BackendException) Random(java.util.Random) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) TokenRange(com.datastax.driver.core.TokenRange) ArrayList(java.util.ArrayList) List(java.util.List) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) Shard(com.baidu.hugegraph.backend.store.Shard)

Example 9 with Shard

use of com.baidu.hugegraph.backend.store.Shard in project incubator-hugegraph by apache.

the class CassandraTable method relation2Cql.

protected Clause relation2Cql(Relation relation) {
    String key = relation.serialKey().toString();
    Object value = relation.serialValue();
    switch(relation.relation()) {
        case EQ:
            return QueryBuilder.eq(key, value);
        case GT:
            return QueryBuilder.gt(key, value);
        case GTE:
            return QueryBuilder.gte(key, value);
        case LT:
            return QueryBuilder.lt(key, value);
        case LTE:
            return QueryBuilder.lte(key, value);
        case IN:
            return Clauses.in(key, (List<?>) value);
        case CONTAINS_VALUE:
            return QueryBuilder.contains(key, value);
        case CONTAINS_KEY:
            return QueryBuilder.containsKey(key, value);
        case SCAN:
            String[] col = pkColumnName().stream().map(pk -> formatKey(pk)).toArray(String[]::new);
            Shard shard = (Shard) value;
            Object start = QueryBuilder.raw(shard.start());
            Object end = QueryBuilder.raw(shard.end());
            return Clauses.and(QueryBuilder.gte(QueryBuilder.token(col), start), QueryBuilder.lt(QueryBuilder.token(col), end));
        // return QueryBuilder.like(key, value);
        case NEQ:
        default:
            throw new NotSupportException("relation '%s'", relation);
    }
}
Also used : Selection(com.datastax.driver.core.querybuilder.Select.Selection) QueryBuilder(com.datastax.driver.core.querybuilder.QueryBuilder) BiFunction(java.util.function.BiFunction) BackendException(com.baidu.hugegraph.backend.BackendException) Clause(com.datastax.driver.core.querybuilder.Clause) Map(java.util.Map) Query(com.baidu.hugegraph.backend.query.Query) PagingStateException(com.datastax.driver.core.exceptions.PagingStateException) IteratorUtils(org.apache.tinkerpop.gremlin.util.iterator.IteratorUtils) Create(com.datastax.driver.core.schemabuilder.Create) Delete(com.datastax.driver.core.querybuilder.Delete) ExtendableIterator(com.baidu.hugegraph.iterator.ExtendableIterator) ImmutableMap(com.google.common.collect.ImmutableMap) Collection(java.util.Collection) Set(java.util.Set) PageState(com.baidu.hugegraph.backend.page.PageState) List(java.util.List) Log(com.baidu.hugegraph.util.Log) Id(com.baidu.hugegraph.backend.id.Id) Select(com.datastax.driver.core.querybuilder.Select) Statement(com.datastax.driver.core.Statement) CopyUtil(com.baidu.hugegraph.util.CopyUtil) BackendEntry(com.baidu.hugegraph.backend.store.BackendEntry) Relation(com.baidu.hugegraph.backend.query.Condition.Relation) Order(com.baidu.hugegraph.backend.query.Query.Order) Row(com.datastax.driver.core.Row) NotFoundException(com.baidu.hugegraph.exception.NotFoundException) Function(java.util.function.Function) BackendTable(com.baidu.hugegraph.backend.store.BackendTable) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) SchemaStatement(com.datastax.driver.core.schemabuilder.SchemaStatement) ResultSet(com.datastax.driver.core.ResultSet) ImmutableList(com.google.common.collect.ImmutableList) DriverException(com.datastax.driver.core.exceptions.DriverException) Shard(com.baidu.hugegraph.backend.store.Shard) NotSupportException(com.baidu.hugegraph.exception.NotSupportException) E(com.baidu.hugegraph.util.E) SchemaBuilder(com.datastax.driver.core.schemabuilder.SchemaBuilder) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) PagingState(com.datastax.driver.core.PagingState) Condition(com.baidu.hugegraph.backend.query.Condition) Update(com.datastax.driver.core.querybuilder.Update) Insert(com.datastax.driver.core.querybuilder.Insert) HugeKeys(com.baidu.hugegraph.type.define.HugeKeys) Clauses(com.datastax.driver.core.querybuilder.Clauses) Aggregate(com.baidu.hugegraph.backend.query.Aggregate) DataType(com.datastax.driver.core.DataType) HugeType(com.baidu.hugegraph.type.HugeType) Definition(com.datastax.driver.core.ColumnDefinitions.Definition) NotSupportException(com.baidu.hugegraph.exception.NotSupportException) Shard(com.baidu.hugegraph.backend.store.Shard)

Example 10 with Shard

use of com.baidu.hugegraph.backend.store.Shard in project incubator-hugegraph by apache.

the class InMemoryDBTable method queryByRange.

private Iterator<BackendEntry> queryByRange(ConditionQuery query) {
    E.checkArgument(query.relations().size() == 1, "Invalid scan with multi conditions: %s", query);
    Condition.Relation scan = query.relations().iterator().next();
    Shard shard = (Shard) scan.value();
    int start = Strings.isNullOrEmpty(shard.start()) ? 0 : Long.valueOf(shard.start()).intValue();
    int end = Strings.isNullOrEmpty(shard.end()) ? 0 : Long.valueOf(shard.end()).intValue();
    List<BackendEntry> rs = new ArrayList<>(end - start);
    Iterator<BackendEntry> iterator = this.store.values().iterator();
    int i = 0;
    while (iterator.hasNext() && i++ < end) {
        BackendEntry entry = iterator.next();
        if (i > start) {
            rs.add(entry);
        }
    }
    return rs.iterator();
}
Also used : Condition(com.baidu.hugegraph.backend.query.Condition) BackendEntry(com.baidu.hugegraph.backend.store.BackendEntry) TextBackendEntry(com.baidu.hugegraph.backend.serializer.TextBackendEntry) ArrayList(java.util.ArrayList) Shard(com.baidu.hugegraph.backend.store.Shard)

Aggregations

Shard (com.baidu.hugegraph.backend.store.Shard)15 Test (org.junit.Test)6 HugeGraph (com.baidu.hugegraph.HugeGraph)4 Range (com.baidu.hugegraph.backend.store.BackendTable.ShardSplitter.Range)4 ArrayList (java.util.ArrayList)4 BackendException (com.baidu.hugegraph.backend.BackendException)3 Condition (com.baidu.hugegraph.backend.query.Condition)3 Relation (com.baidu.hugegraph.backend.query.Condition.Relation)3 NotSupportException (com.baidu.hugegraph.exception.NotSupportException)3 ResultSet (com.datastax.driver.core.ResultSet)3 List (java.util.List)3 Set (java.util.Set)3 Compress (com.baidu.hugegraph.api.filter.CompressInterceptor.Compress)2 ConditionQuery (com.baidu.hugegraph.backend.query.ConditionQuery)2 BackendEntry (com.baidu.hugegraph.backend.store.BackendEntry)2 HugeKeys (com.baidu.hugegraph.type.define.HugeKeys)2 Timed (com.codahale.metrics.annotation.Timed)2 TokenRange (com.datastax.driver.core.TokenRange)2 GET (jakarta.ws.rs.GET)2 Path (jakarta.ws.rs.Path)2