use of org.apache.cassandra.dht.Range in project cassandra by apache.
the class OldNetworkTopologyStrategyTest method testMoveMiddleOfRing.
@SuppressWarnings("unchecked")
@Test
public void testMoveMiddleOfRing() throws UnknownHostException {
// moves to another position in the middle of the ring : should stream all its data, and fetch all its new data
int movingNodeIdx = 1;
int movingNodeIdxAfterMove = 4;
BigIntegerToken newToken = new BigIntegerToken("90070591730234615865843651857942052864");
BigIntegerToken[] tokens = initTokens();
BigIntegerToken[] tokensAfterMove = initTokensAfterMove(tokens, movingNodeIdx, newToken);
Pair<Set<Range<Token>>, Set<Range<Token>>> ranges = calculateStreamAndFetchRanges(tokens, tokensAfterMove, movingNodeIdx);
// sort the results, so they can be compared
Range<Token>[] toStream = ranges.left.toArray(new Range[0]);
Range<Token>[] toFetch = ranges.right.toArray(new Range[0]);
Arrays.sort(toStream);
Arrays.sort(toFetch);
// build expected ranges
Range<Token>[] toStreamExpected = new Range[2];
toStreamExpected[0] = new Range<Token>(getToken(movingNodeIdx - 2, tokens), getToken(movingNodeIdx - 1, tokens));
toStreamExpected[1] = new Range<Token>(getToken(movingNodeIdx - 1, tokens), getToken(movingNodeIdx, tokens));
Arrays.sort(toStreamExpected);
Range<Token>[] toFetchExpected = new Range[2];
toFetchExpected[0] = new Range<Token>(getToken(movingNodeIdxAfterMove - 1, tokens), getToken(movingNodeIdxAfterMove, tokens));
toFetchExpected[1] = new Range<Token>(getToken(movingNodeIdxAfterMove, tokensAfterMove), getToken(movingNodeIdx, tokensAfterMove));
Arrays.sort(toFetchExpected);
assertEquals(Arrays.equals(toStream, toStreamExpected), true);
assertEquals(Arrays.equals(toFetch, toFetchExpected), true);
}
use of org.apache.cassandra.dht.Range in project cassandra by apache.
the class OldNetworkTopologyStrategyTest method testMoveAfterNextNeighbors.
@SuppressWarnings("unchecked")
@Test
public void testMoveAfterNextNeighbors() throws UnknownHostException {
// moves after its next neighbor in the ring
int movingNodeIdx = 1;
int movingNodeIdxAfterMove = 2;
BigIntegerToken newToken = new BigIntegerToken("52535295865117307932921825928971026432");
BigIntegerToken[] tokens = initTokens();
BigIntegerToken[] tokensAfterMove = initTokensAfterMove(tokens, movingNodeIdx, newToken);
Pair<Set<Range<Token>>, Set<Range<Token>>> ranges = calculateStreamAndFetchRanges(tokens, tokensAfterMove, movingNodeIdx);
// sort the results, so they can be compared
Range<Token>[] toStream = ranges.left.toArray(new Range[0]);
Range<Token>[] toFetch = ranges.right.toArray(new Range[0]);
Arrays.sort(toStream);
Arrays.sort(toFetch);
// build expected ranges
Range<Token>[] toStreamExpected = new Range[1];
toStreamExpected[0] = new Range<Token>(getToken(movingNodeIdx - 2, tokens), getToken(movingNodeIdx - 1, tokens));
Arrays.sort(toStreamExpected);
Range<Token>[] toFetchExpected = new Range[2];
toFetchExpected[0] = new Range<Token>(getToken(movingNodeIdxAfterMove - 1, tokens), getToken(movingNodeIdxAfterMove, tokens));
toFetchExpected[1] = new Range<Token>(getToken(movingNodeIdxAfterMove, tokensAfterMove), getToken(movingNodeIdx, tokensAfterMove));
Arrays.sort(toFetchExpected);
assertEquals(Arrays.equals(toStream, toStreamExpected), true);
assertEquals(Arrays.equals(toFetch, toFetchExpected), true);
}
use of org.apache.cassandra.dht.Range in project cassandra by apache.
the class LocalSyncTaskTest method testDifference.
@Test
public void testDifference() throws Throwable {
Range<Token> range = new Range<>(partirioner.getMinimumToken(), partirioner.getRandomToken());
UUID parentRepairSession = UUID.randomUUID();
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Standard1");
ActiveRepairService.instance.registerParentRepairSession(parentRepairSession, FBUtilities.getBroadcastAddress(), Arrays.asList(cfs), Arrays.asList(range), false, ActiveRepairService.UNREPAIRED_SSTABLE, false);
RepairJobDesc desc = new RepairJobDesc(parentRepairSession, UUID.randomUUID(), KEYSPACE1, "Standard1", Arrays.asList(range));
MerkleTrees tree1 = createInitialTree(desc);
MerkleTrees tree2 = createInitialTree(desc);
// change a range in one of the trees
Token token = partirioner.midpoint(range.left, range.right);
tree1.invalidate(token);
MerkleTree.TreeRange changed = tree1.get(token);
changed.hash("non-empty hash!".getBytes());
Set<Range<Token>> interesting = new HashSet<>();
interesting.add(changed);
// difference the trees
// note: we reuse the same endpoint which is bogus in theory but fine here
TreeResponse r1 = new TreeResponse(InetAddress.getByName("127.0.0.1"), tree1);
TreeResponse r2 = new TreeResponse(InetAddress.getByName("127.0.0.2"), tree2);
LocalSyncTask task = new LocalSyncTask(desc, r1, r2, ActiveRepairService.UNREPAIRED_SSTABLE, null, false);
task.run();
// ensure that the changed range was recorded
assertEquals("Wrong differing ranges", interesting.size(), task.getCurrentStat().numberOfDifferences);
}
use of org.apache.cassandra.dht.Range in project cassandra by apache.
the class PendingAntiCompactionTest method acquisitionSuccess.
@Test
public void acquisitionSuccess() throws Exception {
cfs.disableAutoCompaction();
makeSSTables(6);
List<SSTableReader> sstables = new ArrayList<>(cfs.getLiveSSTables());
List<SSTableReader> expected = sstables.subList(0, 3);
Collection<Range<Token>> ranges = new HashSet<>();
for (SSTableReader sstable : expected) {
ranges.add(new Range<>(sstable.first.getToken(), sstable.last.getToken()));
}
PendingAntiCompaction.AcquisitionCallable acquisitionCallable = new PendingAntiCompaction.AcquisitionCallable(cfs, ranges, UUIDGen.getTimeUUID());
logger.info("SSTables: {}", sstables);
logger.info("Expected: {}", expected);
PendingAntiCompaction.AcquireResult result = acquisitionCallable.call();
Assert.assertNotNull(result);
logger.info("Originals: {}", result.txn.originals());
Assert.assertEquals(3, result.txn.originals().size());
for (SSTableReader sstable : expected) {
logger.info("Checking {}", sstable);
Assert.assertTrue(result.txn.originals().contains(sstable));
}
Assert.assertEquals(Transactional.AbstractTransactional.State.IN_PROGRESS, result.txn.state());
result.abort();
}
use of org.apache.cassandra.dht.Range in project cassandra by apache.
the class RepairOptionTest method testParseOptions.
@Test
public void testParseOptions() {
IPartitioner partitioner = Murmur3Partitioner.instance;
Token.TokenFactory tokenFactory = partitioner.getTokenFactory();
// parse with empty options
RepairOption option = RepairOption.parse(new HashMap<String, String>(), partitioner);
if (FBUtilities.isWindows && (DatabaseDescriptor.getDiskAccessMode() != Config.DiskAccessMode.standard || DatabaseDescriptor.getIndexAccessMode() != Config.DiskAccessMode.standard))
assertTrue(option.getParallelism() == RepairParallelism.PARALLEL);
else
assertTrue(option.getParallelism() == RepairParallelism.SEQUENTIAL);
assertFalse(option.isPrimaryRange());
assertFalse(option.isIncremental());
// parse everything except hosts (hosts cannot be combined with data centers)
Map<String, String> options = new HashMap<>();
options.put(RepairOption.PARALLELISM_KEY, "parallel");
options.put(RepairOption.PRIMARY_RANGE_KEY, "false");
options.put(RepairOption.INCREMENTAL_KEY, "false");
options.put(RepairOption.RANGES_KEY, "0:10,11:20,21:30");
options.put(RepairOption.COLUMNFAMILIES_KEY, "cf1,cf2,cf3");
options.put(RepairOption.DATACENTERS_KEY, "dc1,dc2,dc3");
option = RepairOption.parse(options, partitioner);
assertTrue(option.getParallelism() == RepairParallelism.PARALLEL);
assertFalse(option.isPrimaryRange());
assertFalse(option.isIncremental());
Set<Range<Token>> expectedRanges = new HashSet<>(3);
expectedRanges.add(new Range<>(tokenFactory.fromString("0"), tokenFactory.fromString("10")));
expectedRanges.add(new Range<>(tokenFactory.fromString("11"), tokenFactory.fromString("20")));
expectedRanges.add(new Range<>(tokenFactory.fromString("21"), tokenFactory.fromString("30")));
assertEquals(expectedRanges, option.getRanges());
Set<String> expectedCFs = new HashSet<>(3);
expectedCFs.add("cf1");
expectedCFs.add("cf2");
expectedCFs.add("cf3");
assertEquals(expectedCFs, option.getColumnFamilies());
Set<String> expectedDCs = new HashSet<>(3);
expectedDCs.add("dc1");
expectedDCs.add("dc2");
expectedDCs.add("dc3");
assertEquals(expectedDCs, option.getDataCenters());
// expect an error when parsing with hosts as well
options.put(RepairOption.HOSTS_KEY, "127.0.0.1,127.0.0.2,127.0.0.3");
assertParseThrowsIllegalArgumentExceptionWithMessage(options, "Cannot combine -dc and -hosts options");
// remove data centers to proceed with testing parsing hosts
options.remove(RepairOption.DATACENTERS_KEY);
option = RepairOption.parse(options, partitioner);
Set<String> expectedHosts = new HashSet<>(3);
expectedHosts.add("127.0.0.1");
expectedHosts.add("127.0.0.2");
expectedHosts.add("127.0.0.3");
assertEquals(expectedHosts, option.getHosts());
}
Aggregations