use of org.apache.cassandra.dht.ByteOrderedPartitioner.BytesToken in project cassandra by apache.
the class CleanupTest method testCleanupWithIndexes.
/*
@Test
public void testCleanup() throws ExecutionException, InterruptedException
{
StorageService.instance.getTokenMetadata().clearUnsafe();
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
UnfilteredPartitionIterator iter;
// insert data and verify we get it back w/ range query
fillCF(cfs, "val", LOOPS);
// record max timestamps of the sstables pre-cleanup
List<Long> expectedMaxTimestamps = getMaxTimestampList(cfs);
iter = Util.getRangeSlice(cfs);
assertEquals(LOOPS, Iterators.size(iter));
// with one token in the ring, owned by the local node, cleanup should be a no-op
CompactionManager.instance.performCleanup(cfs, 2);
// ensure max timestamp of the sstables are retained post-cleanup
assert expectedMaxTimestamps.equals(getMaxTimestampList(cfs));
// check data is still there
iter = Util.getRangeSlice(cfs);
assertEquals(LOOPS, Iterators.size(iter));
}
*/
@Test
public void testCleanupWithIndexes() throws IOException, ExecutionException, InterruptedException {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_INDEXED1);
// insert data and verify we get it back w/ range query
fillCF(cfs, "birthdate", LOOPS);
assertEquals(LOOPS, Util.getAll(Util.cmd(cfs).build()).size());
ColumnMetadata cdef = cfs.metadata().getColumn(COLUMN);
String indexName = "birthdate_key_index";
long start = System.nanoTime();
while (!cfs.getBuiltIndexes().contains(indexName) && System.nanoTime() - start < TimeUnit.SECONDS.toNanos(10)) Thread.sleep(10);
RowFilter cf = RowFilter.create();
cf.add(cdef, Operator.EQ, VALUE);
assertEquals(LOOPS, Util.getAll(Util.cmd(cfs).filterOn("birthdate", Operator.EQ, VALUE).build()).size());
// we don't allow cleanup when the local host has no range to avoid wipping up all data when a node has not join the ring.
// So to make sure cleanup erase everything here, we give the localhost the tiniest possible range.
TokenMetadata tmd = StorageService.instance.getTokenMetadata();
byte[] tk1 = new byte[1], tk2 = new byte[1];
tk1[0] = 2;
tk2[0] = 1;
tmd.updateNormalToken(new BytesToken(tk1), InetAddress.getByName("127.0.0.1"));
tmd.updateNormalToken(new BytesToken(tk2), InetAddress.getByName("127.0.0.2"));
CompactionManager.instance.performCleanup(cfs, 2);
// row data should be gone
assertEquals(0, Util.getAll(Util.cmd(cfs).build()).size());
// not only should it be gone but there should be no data on disk, not even tombstones
assert cfs.getLiveSSTables().isEmpty();
// 2ary indexes should result in no results, too (although tombstones won't be gone until compacted)
assertEquals(0, Util.getAll(Util.cmd(cfs).filterOn("birthdate", Operator.EQ, VALUE).build()).size());
}
use of org.apache.cassandra.dht.ByteOrderedPartitioner.BytesToken in project cassandra by apache.
the class CleanupTest method testuserDefinedCleanupWithNewToken.
@Test
public void testuserDefinedCleanupWithNewToken() throws ExecutionException, InterruptedException, UnknownHostException {
StorageService.instance.getTokenMetadata().clearUnsafe();
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
// insert data and verify we get it back w/ range query
fillCF(cfs, "val", LOOPS);
assertEquals(LOOPS, Util.getAll(Util.cmd(cfs).build()).size());
TokenMetadata tmd = StorageService.instance.getTokenMetadata();
byte[] tk1 = new byte[1], tk2 = new byte[1];
tk1[0] = 2;
tk2[0] = 1;
tmd.updateNormalToken(new BytesToken(tk1), InetAddress.getByName("127.0.0.1"));
tmd.updateNormalToken(new BytesToken(tk2), InetAddress.getByName("127.0.0.2"));
for (SSTableReader r : cfs.getLiveSSTables()) CompactionManager.instance.forceUserDefinedCleanup(r.getFilename());
assertEquals(0, Util.getAll(Util.cmd(cfs).build()).size());
}
Aggregations