Search in sources :

Example 1 with Token

use of com.datastax.driver.core.Token in project cassandra by apache.

the class TokenRangeQuery method buildQuery.

private String buildQuery(TokenRange tokenRange) {
    Token start = tokenRange.getStart();
    Token end = tokenRange.getEnd();
    List<String> pkColumns = tableMetadata.getPartitionKey().stream().map(ColumnMetadata::getName).collect(Collectors.toList());
    String tokenStatement = String.format("token(%s)", String.join(", ", pkColumns));
    StringBuilder ret = new StringBuilder();
    ret.append("SELECT ");
    // add the token(pk) statement so that we can count partitions
    ret.append(tokenStatement);
    ret.append(", ");
    ret.append(columns);
    ret.append(" FROM ");
    ret.append(tableMetadata.getName());
    if (start != null || end != null)
        ret.append(" WHERE ");
    if (start != null) {
        ret.append(tokenStatement);
        ret.append(" > ");
        ret.append(start.toString());
    }
    if (start != null && end != null)
        ret.append(" AND ");
    if (end != null) {
        ret.append(tokenStatement);
        ret.append(" <= ");
        ret.append(end.toString());
    }
    return ret.toString();
}
Also used : Token(com.datastax.driver.core.Token)

Example 2 with Token

use of com.datastax.driver.core.Token in project beam by apache.

the class ReadFn method processElement.

@ProcessElement
public void processElement(@Element Read<T> read, OutputReceiver<T> receiver) {
    try {
        Session session = ConnectionManager.getSession(read);
        Mapper<T> mapper = read.mapperFactoryFn().apply(session);
        String partitionKey = session.getCluster().getMetadata().getKeyspace(read.keyspace().get()).getTable(read.table().get()).getPartitionKey().stream().map(ColumnMetadata::getName).collect(Collectors.joining(","));
        String query = generateRangeQuery(read, partitionKey, read.ringRanges() != null);
        PreparedStatement preparedStatement = session.prepare(query);
        Set<RingRange> ringRanges = read.ringRanges() == null ? Collections.emptySet() : read.ringRanges().get();
        for (RingRange rr : ringRanges) {
            Token startToken = session.getCluster().getMetadata().newToken(rr.getStart().toString());
            Token endToken = session.getCluster().getMetadata().newToken(rr.getEnd().toString());
            ResultSet rs = session.execute(preparedStatement.bind().setToken(0, startToken).setToken(1, endToken));
            Iterator<T> iter = mapper.map(rs);
            while (iter.hasNext()) {
                T n = iter.next();
                receiver.output(n);
            }
        }
        if (read.ringRanges() == null) {
            ResultSet rs = session.execute(preparedStatement.bind());
            Iterator<T> iter = mapper.map(rs);
            while (iter.hasNext()) {
                receiver.output(iter.next());
            }
        }
    } catch (Exception ex) {
        LOG.error("error", ex);
    }
}
Also used : ResultSet(com.datastax.driver.core.ResultSet) PreparedStatement(com.datastax.driver.core.PreparedStatement) Token(com.datastax.driver.core.Token) Session(com.datastax.driver.core.Session)

Example 3 with Token

use of com.datastax.driver.core.Token in project cassandra by apache.

the class CqlClientHelper method getLocalPrimaryRangeForDC.

public static Map<TokenRange, List<Host>> getLocalPrimaryRangeForDC(String keyspace, Metadata metadata, String targetDC) {
    Objects.requireNonNull(keyspace, "keyspace");
    Objects.requireNonNull(metadata, "metadata");
    Objects.requireNonNull(targetDC, "targetDC");
    // In 2.1 the logic was to have a set of nodes used as a seed, they were used to query
    // client.describe_local_ring(keyspace) -> List<TokenRange>; this should include all nodes in the local dc.
    // TokenRange contained the endpoints in order, so .endpoints.get(0) is the primary owner
    // Client does not have a similar API, instead it returns Set<Host>.  To replicate this we first need
    // to compute the primary owners, then add in the replicas
    List<Token> tokens = new ArrayList<>();
    Map<Token, Host> tokenToHost = new HashMap<>();
    for (Host host : metadata.getAllHosts()) {
        if (!targetDC.equals(host.getDatacenter()))
            continue;
        for (Token token : host.getTokens()) {
            Host previous = tokenToHost.putIfAbsent(token, host);
            if (previous != null)
                throw new IllegalStateException("Two hosts share the same token; hosts " + host.getHostId() + ":" + host.getTokens() + ", " + previous.getHostId() + ":" + previous.getTokens());
            tokens.add(token);
        }
    }
    Collections.sort(tokens);
    Map<TokenRange, List<Host>> rangeToReplicas = new HashMap<>();
    // The first token in the ring uses the last token as its 'start', handle this here to simplify the loop
    Token start = tokens.get(tokens.size() - 1);
    Token end = tokens.get(0);
    addRange(keyspace, metadata, tokenToHost, rangeToReplicas, start, end);
    for (int i = 1; i < tokens.size(); i++) {
        start = tokens.get(i - 1);
        end = tokens.get(i);
        addRange(keyspace, metadata, tokenToHost, rangeToReplicas, start, end);
    }
    return rangeToReplicas;
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Token(com.datastax.driver.core.Token) Host(com.datastax.driver.core.Host) TokenRange(com.datastax.driver.core.TokenRange) List(java.util.List) ArrayList(java.util.ArrayList)

Aggregations

Token (com.datastax.driver.core.Token)3 Host (com.datastax.driver.core.Host)1 PreparedStatement (com.datastax.driver.core.PreparedStatement)1 ResultSet (com.datastax.driver.core.ResultSet)1 Session (com.datastax.driver.core.Session)1 TokenRange (com.datastax.driver.core.TokenRange)1 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 List (java.util.List)1