use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class SSTableRewriterTest method testSSTableSplit.
@Test
public void testSSTableSplit() throws InterruptedException {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
cfs.disableAutoCompaction();
SSTableReader s = writeFile(cfs, 1000);
try (LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.UNKNOWN, s)) {
SSTableSplitter splitter = new SSTableSplitter(cfs, txn, 10);
splitter.split();
assertFileCounts(s.descriptor.directory.list());
LifecycleTransaction.waitForDeletions();
for (File f : s.descriptor.directory.listFiles()) {
// we need to clear out the data dir, otherwise tests running after this breaks
FileUtils.deleteRecursive(f);
}
}
truncate(cfs);
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class SSTableRewriterTest method testNumberOfFiles_finish_empty_new_writer.
@Test
public void testNumberOfFiles_finish_empty_new_writer() throws Exception {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> compacting = Sets.newHashSet(s);
int files = 1;
try (ISSTableScanner scanner = s.getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0);
LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 10000000, false);
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
while (ci.hasNext()) {
rewriter.append(ci.next());
if (rewriter.currentWriter().getFilePointer() > 2500000) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
files++;
// we have one original file plus the ones we have switched out.
assertEquals(cfs.getLiveSSTables().size(), files);
}
if (files == 3) {
//testing to finish when we have nothing written in the new file
rewriter.finish();
break;
}
}
}
LifecycleTransaction.waitForDeletions();
// we never wrote anything to the last file
assertEquals(files - 1, cfs.getLiveSSTables().size());
assertFileCounts(s.descriptor.directory.list());
validateCFS(cfs);
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class SSTableRewriterTest method basicTest2.
@Test
public void basicTest2() throws InterruptedException {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> sstables = new HashSet<>(cfs.getLiveSSTables());
assertEquals(1, sstables.size());
int nowInSec = FBUtilities.nowInSeconds();
try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategyManager().getScanners(sstables);
LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
SSTableRewriter writer = new SSTableRewriter(txn, 1000, 10000000, false);
CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(nowInSec));
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID())) {
writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
while (ci.hasNext()) {
writer.append(ci.next());
}
writer.finish();
}
LifecycleTransaction.waitForDeletions();
assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.list()));
validateCFS(cfs);
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class SSTableWriterTest method testAbortTxnWithClosedAndOpenWriterShouldRemoveAllSSTables.
@Test
public void testAbortTxnWithClosedAndOpenWriterShouldRemoveAllSSTables() throws InterruptedException {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
File dir = cfs.getDirectories().getDirectoryForNewSSTables();
LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.STREAM);
SSTableWriter writer1 = getWriter(cfs, dir, txn);
SSTableWriter writer2 = getWriter(cfs, dir, txn);
try {
for (int i = 0; i < 10000; i++) {
UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), random(i, 10)).withTimestamp(1);
for (int j = 0; j < 100; j++) builder.newRow("" + j).add("val", ByteBuffer.allocate(1000));
writer1.append(builder.build().unfilteredIterator());
}
assertFileCounts(dir.list());
for (int i = 10000; i < 20000; i++) {
UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), random(i, 10)).withTimestamp(1);
for (int j = 0; j < 100; j++) builder.newRow("" + j).add("val", ByteBuffer.allocate(1000));
writer2.append(builder.build().unfilteredIterator());
}
SSTableReader sstable = writer1.finish(true);
txn.update(sstable, false);
assertFileCounts(dir.list());
int datafiles = assertFileCounts(dir.list());
assertEquals(datafiles, 2);
// open till .abort() is called (via the builder)
if (!FBUtilities.isWindows) {
LifecycleTransaction.waitForDeletions();
assertFileCounts(dir.list());
}
txn.abort();
LifecycleTransaction.waitForDeletions();
datafiles = assertFileCounts(dir.list());
assertEquals(datafiles, 0);
validateCFS(cfs);
} finally {
writer1.close();
writer2.close();
}
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class SSTableWriterTest method testAbortTxnWithOpenEarlyShouldRemoveSSTable.
@Test
public void testAbortTxnWithOpenEarlyShouldRemoveSSTable() throws InterruptedException {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
File dir = cfs.getDirectories().getDirectoryForNewSSTables();
LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.WRITE);
try (SSTableWriter writer = getWriter(cfs, dir, txn)) {
for (int i = 0; i < 10000; i++) {
UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), random(i, 10)).withTimestamp(1);
for (int j = 0; j < 100; j++) builder.newRow("" + j).add("val", ByteBuffer.allocate(1000));
writer.append(builder.build().unfilteredIterator());
}
SSTableReader s = writer.setMaxDataAge(1000).openEarly();
assert s != null;
assertFileCounts(dir.list());
for (int i = 10000; i < 20000; i++) {
UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), random(i, 10)).withTimestamp(1);
for (int j = 0; j < 100; j++) builder.newRow("" + j).add("val", ByteBuffer.allocate(1000));
writer.append(builder.build().unfilteredIterator());
}
SSTableReader s2 = writer.setMaxDataAge(1000).openEarly();
assertTrue(s.last.compareTo(s2.last) < 0);
assertFileCounts(dir.list());
s.selfRef().release();
s2.selfRef().release();
int datafiles = assertFileCounts(dir.list());
assertEquals(datafiles, 1);
// open till .abort() is called (via the builder)
if (!FBUtilities.isWindows) {
LifecycleTransaction.waitForDeletions();
assertFileCounts(dir.list());
}
writer.abort();
txn.abort();
LifecycleTransaction.waitForDeletions();
datafiles = assertFileCounts(dir.list());
assertEquals(datafiles, 0);
validateCFS(cfs);
}
}
Aggregations