use of org.apache.cassandra.locator.TokenMetadata in project cassandra by apache.
the class CleanupTest method testCleanupWithNoTokenRange.
private void testCleanupWithNoTokenRange(boolean isUserDefined) throws Exception {
TokenMetadata tmd = StorageService.instance.getTokenMetadata();
tmd.clearUnsafe();
tmd.updateHostId(UUID.randomUUID(), InetAddressAndPort.getByName("127.0.0.1"));
byte[] tk1 = { 2 };
tmd.updateNormalToken(new BytesToken(tk1), InetAddressAndPort.getByName("127.0.0.1"));
Keyspace keyspace = Keyspace.open(KEYSPACE2);
keyspace.setMetadata(KeyspaceMetadata.create(KEYSPACE2, KeyspaceParams.nts("DC1", 1)));
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD2);
// insert data and verify we get it back w/ range query
fillCF(cfs, "val", LOOPS);
assertEquals(LOOPS, Util.getAll(Util.cmd(cfs).build()).size());
// remove replication on DC1
keyspace.setMetadata(KeyspaceMetadata.create(KEYSPACE2, KeyspaceParams.nts("DC1", 0)));
// clear token range for localhost on DC1
if (isUserDefined) {
for (SSTableReader r : cfs.getLiveSSTables()) CompactionManager.instance.forceUserDefinedCleanup(r.getFilename());
} else {
CompactionManager.instance.performCleanup(cfs, 2);
}
assertEquals(0, Util.getAll(Util.cmd(cfs).build()).size());
assertTrue(cfs.getLiveSSTables().isEmpty());
}
use of org.apache.cassandra.locator.TokenMetadata in project cassandra by apache.
the class CleanupTest method testuserDefinedCleanupWithNewToken.
@Test
public void testuserDefinedCleanupWithNewToken() throws ExecutionException, InterruptedException, UnknownHostException {
StorageService.instance.getTokenMetadata().clearUnsafe();
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
// insert data and verify we get it back w/ range query
fillCF(cfs, "val", LOOPS);
assertEquals(LOOPS, Util.getAll(Util.cmd(cfs).build()).size());
TokenMetadata tmd = StorageService.instance.getTokenMetadata();
byte[] tk1 = new byte[1], tk2 = new byte[1];
tk1[0] = 2;
tk2[0] = 1;
tmd.updateNormalToken(new BytesToken(tk1), InetAddressAndPort.getByName("127.0.0.1"));
tmd.updateNormalToken(new BytesToken(tk2), InetAddressAndPort.getByName("127.0.0.2"));
for (SSTableReader r : cfs.getLiveSSTables()) CompactionManager.instance.forceUserDefinedCleanup(r.getFilename());
assertEquals(0, Util.getAll(Util.cmd(cfs).build()).size());
}
use of org.apache.cassandra.locator.TokenMetadata in project cassandra by apache.
the class CleanupTest method testCleanupWithNewToken.
@Test
public void testCleanupWithNewToken() throws ExecutionException, InterruptedException, UnknownHostException {
StorageService.instance.getTokenMetadata().clearUnsafe();
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
// insert data and verify we get it back w/ range query
fillCF(cfs, "val", LOOPS);
assertEquals(LOOPS, Util.getAll(Util.cmd(cfs).build()).size());
TokenMetadata tmd = StorageService.instance.getTokenMetadata();
byte[] tk1 = new byte[1], tk2 = new byte[1];
tk1[0] = 2;
tk2[0] = 1;
tmd.updateNormalToken(new BytesToken(tk1), InetAddressAndPort.getByName("127.0.0.1"));
tmd.updateNormalToken(new BytesToken(tk2), InetAddressAndPort.getByName("127.0.0.2"));
CompactionManager.instance.performCleanup(cfs, 2);
assertEquals(0, Util.getAll(Util.cmd(cfs).build()).size());
}
use of org.apache.cassandra.locator.TokenMetadata in project cassandra by apache.
the class HintTest method resetGcGraceSeconds.
@Before
public void resetGcGraceSeconds() {
TokenMetadata tokenMeta = StorageService.instance.getTokenMetadata();
InetAddressAndPort local = FBUtilities.getBroadcastAddressAndPort();
tokenMeta.clearUnsafe();
tokenMeta.updateHostId(UUID.randomUUID(), local);
tokenMeta.updateNormalTokens(BootStrapper.getRandomTokens(tokenMeta, 1), local);
for (TableMetadata table : Schema.instance.getTablesAndViews(KEYSPACE)) MigrationManager.announceTableUpdate(table.unbuild().gcGraceSeconds(864000).build(), true);
}
use of org.apache.cassandra.locator.TokenMetadata in project cassandra by apache.
the class HintTest method testChangedTopology.
@SuppressWarnings("unchecked")
@Test
public void testChangedTopology() throws Exception {
// create a hint
long now = FBUtilities.timestampMicros();
String key = "testChangedTopology";
Mutation mutation = createMutation(key, now);
Hint hint = Hint.create(mutation, now / 1000);
// Prepare metadata with injected stale endpoint serving the mutation key.
TokenMetadata tokenMeta = StorageService.instance.getTokenMetadata();
InetAddressAndPort local = FBUtilities.getBroadcastAddressAndPort();
InetAddressAndPort endpoint = InetAddressAndPort.getByName("1.1.1.1");
UUID localId = StorageService.instance.getLocalHostUUID();
UUID targetId = UUID.randomUUID();
tokenMeta.updateHostId(targetId, endpoint);
tokenMeta.updateNormalTokens(ImmutableList.of(mutation.key().getToken()), endpoint);
// sanity check that there is no data inside yet
assertNoPartitions(key, TABLE0);
assertNoPartitions(key, TABLE1);
assertNoPartitions(key, TABLE2);
assert StorageProxy.instance.getHintsInProgress() == 0;
long totalHintCount = StorageProxy.instance.getTotalHints();
// Process hint message.
HintMessage message = new HintMessage(localId, hint);
HINT_REQ.handler().doVerb(Message.out(HINT_REQ, message));
// hint should not be applied as we no longer are a replica
assertNoPartitions(key, TABLE0);
assertNoPartitions(key, TABLE1);
assertNoPartitions(key, TABLE2);
// Attempt to send to new endpoint should have been made. Node is not live hence it should now be a hint.
assertEquals(totalHintCount + 1, StorageProxy.instance.getTotalHints());
}
Aggregations