use of org.apache.cassandra.dht.Token in project cassandra by apache.
the class ReplicationAwareTokenAllocator method populateTokenInfoAndAdjustUnit.
private void populateTokenInfoAndAdjustUnit(TokenInfo<Unit> populate, GroupInfo newUnitGroup) {
Token replicationStart = populateTokenInfo(populate, newUnitGroup);
double newOwnership = replicationStart.size(populate.token);
double oldOwnership = populate.replicatedOwnership;
populate.replicatedOwnership = newOwnership;
populate.owningUnit.ownership += newOwnership - oldOwnership;
}
use of org.apache.cassandra.dht.Token in project cassandra by apache.
the class TokenAllocation method addOwnership.
static void addOwnership(final TokenMetadata tokenMetadata, final AbstractReplicationStrategy rs, Token current, Token next, Map<InetAddress, Double> ownership) {
double size = current.size(next);
Token representative = current.getPartitioner().midpoint(current, next);
for (InetAddress n : rs.calculateNaturalEndpoints(representative, tokenMetadata)) {
Double v = ownership.get(n);
ownership.put(n, v != null ? v + size : size);
}
}
use of org.apache.cassandra.dht.Token in project cassandra by apache.
the class RepairOption method parse.
/**
* Construct RepairOptions object from given map of Strings.
* <p>
* Available options are:
*
* <table>
* <caption>Repair Options</caption>
* <thead>
* <tr>
* <th>key</th>
* <th>value</th>
* <th>default (when key not given)</th>
* </tr>
* </thead>
* <tbody>
* <tr>
* <td>parallelism</td>
* <td>"sequential", "parallel" or "dc_parallel"</td>
* <td>"sequential"</td>
* </tr>
* <tr>
* <td>primaryRange</td>
* <td>"true" if perform repair only on primary range.</td>
* <td>false</td>
* </tr>
* <tr>
* <td>incremental</td>
* <td>"true" if perform incremental repair.</td>
* <td>false</td>
* </tr>
* <tr>
* <td>trace</td>
* <td>"true" if repair is traced.</td>
* <td>false</td>
* </tr>
* <tr>
* <td>jobThreads</td>
* <td>Number of threads to use to run repair job.</td>
* <td>1</td>
* </tr>
* <tr>
* <td>ranges</td>
* <td>Ranges to repair. A range is expressed as <start token>:<end token>
* and multiple ranges can be given as comma separated ranges(e.g. aaa:bbb,ccc:ddd).</td>
* <td></td>
* </tr>
* <tr>
* <td>columnFamilies</td>
* <td>Specify names of ColumnFamilies to repair.
* Multiple ColumnFamilies can be given as comma separated values(e.g. cf1,cf2,cf3).</td>
* <td></td>
* </tr>
* <tr>
* <td>dataCenters</td>
* <td>Specify names of data centers who participate in this repair.
* Multiple data centers can be given as comma separated values(e.g. dc1,dc2,dc3).</td>
* <td></td>
* </tr>
* <tr>
* <td>hosts</td>
* <td>Specify names of hosts who participate in this repair.
* Multiple hosts can be given as comma separated values(e.g. cass1,cass2).</td>
* <td></td>
* </tr>
* <tr>
* <td>pullRepair</td>
* <td>"true" if the repair should only stream data one way from a remote host to this host.
* This is only allowed if exactly 2 hosts are specified along with a token range that they share.</td>
* <td>false</td>
* </tr>
* </tbody>
* </table>
*
* @param options options to parse
* @param partitioner partitioner is used to construct token ranges
* @return RepairOptions object
*/
public static RepairOption parse(Map<String, String> options, IPartitioner partitioner) {
// if no parallel option is given, then this will be "sequential" by default.
RepairParallelism parallelism = RepairParallelism.fromName(options.get(PARALLELISM_KEY));
boolean primaryRange = Boolean.parseBoolean(options.get(PRIMARY_RANGE_KEY));
boolean incremental = Boolean.parseBoolean(options.get(INCREMENTAL_KEY));
boolean trace = Boolean.parseBoolean(options.get(TRACE_KEY));
boolean pullRepair = Boolean.parseBoolean(options.get(PULL_REPAIR_KEY));
int jobThreads = 1;
if (options.containsKey(JOB_THREADS_KEY)) {
try {
jobThreads = Integer.parseInt(options.get(JOB_THREADS_KEY));
} catch (NumberFormatException ignore) {
}
}
// ranges
String rangesStr = options.get(RANGES_KEY);
Set<Range<Token>> ranges = new HashSet<>();
if (rangesStr != null) {
if (incremental)
logger.warn("Incremental repair can't be requested with subrange repair " + "because each subrange repair would generate an anti-compacted table. " + "The repair will occur but without anti-compaction.");
StringTokenizer tokenizer = new StringTokenizer(rangesStr, ",");
while (tokenizer.hasMoreTokens()) {
String[] rangeStr = tokenizer.nextToken().split(":", 2);
if (rangeStr.length < 2) {
continue;
}
Token parsedBeginToken = partitioner.getTokenFactory().fromString(rangeStr[0].trim());
Token parsedEndToken = partitioner.getTokenFactory().fromString(rangeStr[1].trim());
ranges.add(new Range<>(parsedBeginToken, parsedEndToken));
}
}
RepairOption option = new RepairOption(parallelism, primaryRange, incremental, trace, jobThreads, ranges, !ranges.isEmpty(), pullRepair);
// data centers
String dataCentersStr = options.get(DATACENTERS_KEY);
Collection<String> dataCenters = new HashSet<>();
if (dataCentersStr != null) {
StringTokenizer tokenizer = new StringTokenizer(dataCentersStr, ",");
while (tokenizer.hasMoreTokens()) {
dataCenters.add(tokenizer.nextToken().trim());
}
option.getDataCenters().addAll(dataCenters);
}
// hosts
String hostsStr = options.get(HOSTS_KEY);
Collection<String> hosts = new HashSet<>();
if (hostsStr != null) {
StringTokenizer tokenizer = new StringTokenizer(hostsStr, ",");
while (tokenizer.hasMoreTokens()) {
hosts.add(tokenizer.nextToken().trim());
}
option.getHosts().addAll(hosts);
}
// columnfamilies
String cfStr = options.get(COLUMNFAMILIES_KEY);
if (cfStr != null) {
Collection<String> columnFamilies = new HashSet<>();
StringTokenizer tokenizer = new StringTokenizer(cfStr, ",");
while (tokenizer.hasMoreTokens()) {
columnFamilies.add(tokenizer.nextToken().trim());
}
option.getColumnFamilies().addAll(columnFamilies);
}
// validate options
if (jobThreads > MAX_JOB_THREADS) {
throw new IllegalArgumentException("Too many job threads. Max is " + MAX_JOB_THREADS);
}
if (!dataCenters.isEmpty() && !hosts.isEmpty()) {
throw new IllegalArgumentException("Cannot combine -dc and -hosts options.");
}
if (primaryRange && ((!dataCenters.isEmpty() && !option.isInLocalDCOnly()) || !hosts.isEmpty())) {
throw new IllegalArgumentException("You need to run primary range repair on all nodes in the cluster.");
}
if (pullRepair) {
if (hosts.size() != 2) {
throw new IllegalArgumentException("Pull repair can only be performed between two hosts. Please specify two hosts, one of which must be this host.");
} else if (ranges.isEmpty()) {
throw new IllegalArgumentException("Token ranges must be specified when performing pull repair. Please specify at least one token range which both hosts have in common.");
}
}
if (option.isIncremental() && !option.isGlobal()) {
throw new IllegalArgumentException("Incremental repairs cannot be run against a subset of tokens or ranges");
}
return option;
}
use of org.apache.cassandra.dht.Token in project cassandra by apache.
the class MerkleTreeTest method testSerialization.
@Test
public void testSerialization() throws Exception {
Range<Token> full = new Range<>(tok(-1), tok(-1));
// populate and validate the tree
mt.maxsize(256);
mt.init();
for (TreeRange range : mt.invalids()) range.addAll(new HIterator(range.right));
byte[] initialhash = mt.hash(full);
DataOutputBuffer out = new DataOutputBuffer();
MerkleTree.serializer.serialize(mt, out, MessagingService.current_version);
byte[] serialized = out.toByteArray();
DataInputPlus in = new DataInputBuffer(serialized);
MerkleTree restored = MerkleTree.serializer.deserialize(in, MessagingService.current_version);
assertHashEquals(initialhash, restored.hash(full));
}
use of org.apache.cassandra.dht.Token in project cassandra by apache.
the class MerkleTreeTest method testValidateTree.
/**
* Generate two trees with different splits, but containing the same keys, and
* check that they compare equally.
*
* The set of keys used in this test is: #{2,4,6,8,12,14,0}
*/
@Test
public void testValidateTree() {
// this test needs slightly more resolution
TOKEN_SCALE = new BigInteger("16");
Range<Token> full = new Range<>(tok(-1), tok(-1));
Iterator<TreeRange> ranges;
MerkleTree mt2 = new MerkleTree(partitioner, fullRange(), RECOMMENDED_DEPTH, Integer.MAX_VALUE);
mt.split(tok(8));
mt.split(tok(4));
mt.split(tok(12));
mt.split(tok(6));
mt.split(tok(10));
ranges = mt.invalids();
// (-1,4]: depth 2
ranges.next().addAll(new HIterator(2, 4));
// (4,6]
ranges.next().addAll(new HIterator(6));
// (6,8]
ranges.next().addAll(new HIterator(8));
// (8,10]
ranges.next().addAll(new HIterator(/*empty*/
new int[0]));
// (10,12]
ranges.next().addAll(new HIterator(12));
// (12,-1]: depth 2
ranges.next().addAll(new HIterator(14, -1));
mt2.split(tok(8));
mt2.split(tok(4));
mt2.split(tok(12));
mt2.split(tok(2));
mt2.split(tok(10));
mt2.split(tok(9));
mt2.split(tok(11));
ranges = mt2.invalids();
// (-1,2]
ranges.next().addAll(new HIterator(2));
// (2,4]
ranges.next().addAll(new HIterator(4));
// (4,8]: depth 2
ranges.next().addAll(new HIterator(6, 8));
// (8,9]
ranges.next().addAll(new HIterator(/*empty*/
new int[0]));
// (9,10]
ranges.next().addAll(new HIterator(/*empty*/
new int[0]));
// (10,11]: depth 4
ranges.next().addAll(new HIterator(/*empty*/
new int[0]));
// (11,12]: depth 4
ranges.next().addAll(new HIterator(12));
// (12,-1]: depth 2
ranges.next().addAll(new HIterator(14, -1));
byte[] mthash = mt.hash(full);
byte[] mt2hash = mt2.hash(full);
assertHashEquals("Tree hashes did not match: " + mt + " && " + mt2, mthash, mt2hash);
}
Aggregations