use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class SSTableMetadataTest method testTrackMaxDeletionTime.
@Test
public void testTrackMaxDeletionTime() {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard1");
long timestamp = System.currentTimeMillis();
for (int i = 0; i < 10; i++) {
DecoratedKey key = Util.dk(Integer.toString(i));
for (int j = 0; j < 10; j++) new RowUpdateBuilder(store.metadata(), timestamp, 10 + j, Integer.toString(i)).clustering(Integer.toString(j)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
}
new RowUpdateBuilder(store.metadata(), timestamp, 10000, "longttl").clustering("col").add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
store.forceBlockingFlush();
assertEquals(1, store.getLiveSSTables().size());
int ttltimestamp = (int) (System.currentTimeMillis() / 1000);
int firstDelTime = 0;
for (SSTableReader sstable : store.getLiveSSTables()) {
firstDelTime = sstable.getSSTableMetadata().maxLocalDeletionTime;
assertEquals(ttltimestamp + 10000, firstDelTime, 10);
}
new RowUpdateBuilder(store.metadata(), timestamp, 20000, "longttl2").clustering("col").add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
ttltimestamp = (int) (System.currentTimeMillis() / 1000);
store.forceBlockingFlush();
assertEquals(2, store.getLiveSSTables().size());
List<SSTableReader> sstables = new ArrayList<>(store.getLiveSSTables());
if (sstables.get(0).getSSTableMetadata().maxLocalDeletionTime < sstables.get(1).getSSTableMetadata().maxLocalDeletionTime) {
assertEquals(sstables.get(0).getSSTableMetadata().maxLocalDeletionTime, firstDelTime);
assertEquals(sstables.get(1).getSSTableMetadata().maxLocalDeletionTime, ttltimestamp + 20000, 10);
} else {
assertEquals(sstables.get(1).getSSTableMetadata().maxLocalDeletionTime, firstDelTime);
assertEquals(sstables.get(0).getSSTableMetadata().maxLocalDeletionTime, ttltimestamp + 20000, 10);
}
Util.compact(store, store.getLiveSSTables());
assertEquals(1, store.getLiveSSTables().size());
for (SSTableReader sstable : store.getLiveSSTables()) {
assertEquals(sstable.getSSTableMetadata().maxLocalDeletionTime, ttltimestamp + 20000, 10);
}
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class SSTableMetadataTest method testWithDeletes.
/**
* 1. create a row with columns with ttls, 5x100 and 1x1000
* 2. flush, verify (maxLocalDeletionTime = time+1000)
* 3. delete column with ttl=1000
* 4. flush, verify the new sstable (maxLocalDeletionTime = ~now)
* 5. compact
* 6. verify resulting sstable has maxLocalDeletionTime = time + 100.
*
* @throws ExecutionException
* @throws InterruptedException
*/
@Test
public void testWithDeletes() throws ExecutionException, InterruptedException {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard2");
long timestamp = System.currentTimeMillis();
DecoratedKey key = Util.dk("deletetest");
for (int i = 0; i < 5; i++) new RowUpdateBuilder(store.metadata(), timestamp, 100, "deletetest").clustering("deletecolumn" + i).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
new RowUpdateBuilder(store.metadata(), timestamp, 1000, "deletetest").clustering("todelete").add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
store.forceBlockingFlush();
assertEquals(1, store.getLiveSSTables().size());
int ttltimestamp = (int) (System.currentTimeMillis() / 1000);
int firstMaxDelTime = 0;
for (SSTableReader sstable : store.getLiveSSTables()) {
firstMaxDelTime = sstable.getSSTableMetadata().maxLocalDeletionTime;
assertEquals(ttltimestamp + 1000, firstMaxDelTime, 10);
}
RowUpdateBuilder.deleteRow(store.metadata(), timestamp + 1, "deletetest", "todelete").applyUnsafe();
store.forceBlockingFlush();
assertEquals(2, store.getLiveSSTables().size());
boolean foundDelete = false;
for (SSTableReader sstable : store.getLiveSSTables()) {
if (sstable.getSSTableMetadata().maxLocalDeletionTime != firstMaxDelTime) {
assertEquals(sstable.getSSTableMetadata().maxLocalDeletionTime, ttltimestamp, 10);
foundDelete = true;
}
}
assertTrue(foundDelete);
Util.compact(store, store.getLiveSSTables());
assertEquals(1, store.getLiveSSTables().size());
for (SSTableReader sstable : store.getLiveSSTables()) {
assertEquals(ttltimestamp + 100, sstable.getSSTableMetadata().maxLocalDeletionTime, 10);
}
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class SSTableMetadataTest method testMaxMinComposites.
@Test
public void testMaxMinComposites() throws CharacterCodingException, ExecutionException, InterruptedException {
/*
creates two sstables, columns like this:
---------------------
k |a0:9|a1:8|..|a9:0
---------------------
and
---------------------
k2 |b0:9|b1:8|..|b9:0
---------------------
meaning max columns are b9 and 9, min is a0 and 0
*/
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("StandardComposite2");
for (int i = 0; i < 10; i++) {
new RowUpdateBuilder(cfs.metadata(), 0, "k").clustering("a" + (9 - i), getBytes(i)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
}
cfs.forceBlockingFlush();
for (int i = 0; i < 10; i++) {
new RowUpdateBuilder(cfs.metadata(), 0, "k2").clustering("b" + (9 - i), getBytes(i)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
}
cfs.forceBlockingFlush();
cfs.forceMajorCompaction();
assertEquals(cfs.getLiveSSTables().size(), 1);
for (SSTableReader sstable : cfs.getLiveSSTables()) {
assertEquals("b9", ByteBufferUtil.string(sstable.getSSTableMetadata().maxClusteringValues.get(0)));
assertEquals(9, ByteBufferUtil.toInt(sstable.getSSTableMetadata().maxClusteringValues.get(1)));
assertEquals("a0", ByteBufferUtil.string(sstable.getSSTableMetadata().minClusteringValues.get(0)));
assertEquals(0, ByteBufferUtil.toInt(sstable.getSSTableMetadata().minClusteringValues.get(1)));
}
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class SSTableRewriterTest method testNumberOfFiles_dont_clean_readers.
@Test
public void testNumberOfFiles_dont_clean_readers() throws Exception {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> compacting = Sets.newHashSet(s);
List<SSTableReader> sstables;
int files = 1;
try (ISSTableScanner scanner = s.getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0);
LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 10000000, false);
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
while (ci.hasNext()) {
rewriter.append(ci.next());
if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
files++;
// we have one original file plus the ones we have switched out.
assertEquals(cfs.getLiveSSTables().size(), files);
}
}
sstables = rewriter.finish();
}
assertEquals(files, sstables.size());
assertEquals(files, cfs.getLiveSSTables().size());
LifecycleTransaction.waitForDeletions();
assertFileCounts(s.descriptor.directory.list());
validateCFS(cfs);
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class SSTableRewriterTest method testNumberOfFiles_abort.
private void testNumberOfFiles_abort(RewriterTest test) throws Exception {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
DecoratedKey origFirst = s.first;
DecoratedKey origLast = s.last;
long startSize = cfs.metric.liveDiskSpaceUsed.getCount();
Set<SSTableReader> compacting = Sets.newHashSet(s);
try (ISSTableScanner scanner = s.getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0);
LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 10000000, false)) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
test.run(scanner, controller, s, cfs, rewriter, txn);
}
LifecycleTransaction.waitForDeletions();
assertEquals(startSize, cfs.metric.liveDiskSpaceUsed.getCount());
assertEquals(1, cfs.getLiveSSTables().size());
assertFileCounts(s.descriptor.directory.list());
assertEquals(cfs.getLiveSSTables().iterator().next().first, origFirst);
assertEquals(cfs.getLiveSSTables().iterator().next().last, origLast);
validateCFS(cfs);
}
Aggregations