use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class SizeTieredCompactionStrategyTest method testPrepBucket.
@Test
public void testPrepBucket() throws Exception {
String ksname = KEYSPACE1;
String cfname = "Standard1";
Keyspace keyspace = Keyspace.open(ksname);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
cfs.truncateBlocking();
cfs.disableAutoCompaction();
ByteBuffer value = ByteBuffer.wrap(new byte[100]);
// create 3 sstables
int numSSTables = 3;
for (int r = 0; r < numSSTables; r++) {
String key = String.valueOf(r);
new RowUpdateBuilder(cfs.metadata(), 0, key).clustering("column").add("val", value).build().applyUnsafe();
cfs.forceBlockingFlush();
}
cfs.forceBlockingFlush();
List<SSTableReader> sstrs = new ArrayList<>(cfs.getLiveSSTables());
Pair<List<SSTableReader>, Double> bucket;
List<SSTableReader> interestingBucket = mostInterestingBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32);
assertTrue("nothing should be returned when all buckets are below the min threshold", interestingBucket.isEmpty());
sstrs.get(0).overrideReadMeter(new RestorableMeter(100.0, 100.0));
sstrs.get(1).overrideReadMeter(new RestorableMeter(200.0, 200.0));
sstrs.get(2).overrideReadMeter(new RestorableMeter(300.0, 300.0));
long estimatedKeys = sstrs.get(0).estimatedKeys();
// if we have more than the max threshold, the coldest should be dropped
bucket = trimToThresholdWithHotness(sstrs, 2);
assertEquals("one bucket should have been dropped", 2, bucket.left.size());
double expectedBucketHotness = (200.0 + 300.0) / estimatedKeys;
assertEquals(String.format("bucket hotness (%f) should be close to %f", bucket.right, expectedBucketHotness), expectedBucketHotness, bucket.right, 1.0);
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class KeyCollisionTest method testGetSliceWithCollision.
@Test
public void testGetSliceWithCollision() throws Exception {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
cfs.clearUnsafe();
// token = 2, kq ordered after row below lexicographically
insert("k1", "k2", "kq");
// token = 4
insert("key1", "key2", "key3");
// token = 8
insert("longKey1", "longKey2");
List<FilteredPartition> partitions = Util.getAll(Util.cmd(cfs).fromKeyIncl("k2").toKeyIncl("key2").build());
assert partitions.get(0).partitionKey().getKey().equals(ByteBufferUtil.bytes("k2"));
assert partitions.get(1).partitionKey().getKey().equals(ByteBufferUtil.bytes("kq"));
assert partitions.get(2).partitionKey().getKey().equals(ByteBufferUtil.bytes("key1"));
assert partitions.get(3).partitionKey().getKey().equals(ByteBufferUtil.bytes("key2"));
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class SSTableWriterTestBase method truncateCF.
@After
public void truncateCF() {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF);
store.truncateBlocking();
LifecycleTransaction.waitForDeletions();
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class PerSSTableIndexWriterTest method testSparse.
@Test
public void testSparse() throws Exception {
final String columnName = "timestamp";
ColumnFamilyStore cfs = Keyspace.open(KS_NAME).getColumnFamilyStore(CF_NAME);
ColumnMetadata column = cfs.metadata().getColumn(UTF8Type.instance.decompose(columnName));
SASIIndex sasi = (SASIIndex) cfs.indexManager.getIndexByName(cfs.name + "_" + columnName);
File directory = cfs.getDirectories().getDirectoryForNewSSTables();
Descriptor descriptor = cfs.newSSTableDescriptor(directory);
PerSSTableIndexWriter indexWriter = (PerSSTableIndexWriter) sasi.getFlushObserver(descriptor, OperationType.FLUSH);
final long now = System.currentTimeMillis();
indexWriter.begin();
indexWriter.indexes.put(column, indexWriter.newIndex(sasi.getIndex()));
populateSegment(cfs.metadata(), indexWriter.getIndex(column), new HashMap<Long, Set<Integer>>() {
{
put(now, new HashSet<>(Arrays.asList(0, 1)));
put(now + 1, new HashSet<>(Arrays.asList(2, 3)));
put(now + 2, new HashSet<>(Arrays.asList(4, 5, 6, 7, 8, 9)));
}
});
Callable<OnDiskIndex> segmentBuilder = indexWriter.getIndex(column).scheduleSegmentFlush(false);
Assert.assertNull(segmentBuilder.call());
PerSSTableIndexWriter.Index index = indexWriter.getIndex(column);
Random random = ThreadLocalRandom.current();
Set<String> segments = new HashSet<>();
// now let's test multiple correct segments with yield incorrect final segment
for (int i = 0; i < 3; i++) {
populateSegment(cfs.metadata(), index, new HashMap<Long, Set<Integer>>() {
{
put(now, new HashSet<>(Arrays.asList(random.nextInt(), random.nextInt(), random.nextInt())));
put(now + 1, new HashSet<>(Arrays.asList(random.nextInt(), random.nextInt(), random.nextInt())));
put(now + 2, new HashSet<>(Arrays.asList(random.nextInt(), random.nextInt(), random.nextInt())));
}
});
try {
// flush each of the new segments, they should all succeed
OnDiskIndex segment = index.scheduleSegmentFlush(false).call();
index.segments.add(Futures.immediateFuture(segment));
segments.add(segment.getIndexPath());
} catch (Exception | FSError e) {
e.printStackTrace();
Assert.fail();
}
}
// make sure that all of the segments are present of the filesystem
for (String segment : segments) Assert.assertTrue(new File(segment).exists());
indexWriter.complete();
// make sure that individual segments have been cleaned up
for (String segment : segments) Assert.assertFalse(new File(segment).exists());
// and combined index doesn't exist either
Assert.assertFalse(new File(index.outputFile).exists());
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class LegacySSTableTest method loadLegacyTable.
private static void loadLegacyTable(String tablePattern, String legacyVersion, String compactSuffix) throws IOException {
String table = String.format(tablePattern, legacyVersion, compactSuffix);
logger.info("Loading legacy table {}", table);
ColumnFamilyStore cfs = Keyspace.open("legacy_tables").getColumnFamilyStore(table);
for (File cfDir : cfs.getDirectories().getCFDirectories()) {
copySstablesToTestData(legacyVersion, table, cfDir);
}
cfs.loadNewSSTables();
}
Aggregations