use of org.apache.cassandra.db.compaction.CompactionIterator in project cassandra by apache.
the class SSTableRewriterTest method testAbortHelper.
private void testAbortHelper(boolean earlyException, boolean offline) throws Exception {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
SSTableReader s = writeFile(cfs, 1000);
if (!offline)
cfs.addSSTable(s);
Set<SSTableReader> compacting = Sets.newHashSet(s);
try (ISSTableScanner scanner = compacting.iterator().next().getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0);
LifecycleTransaction txn = offline ? LifecycleTransaction.offline(OperationType.UNKNOWN, compacting) : cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
SSTableRewriter rewriter = new SSTableRewriter(txn, 100, 10000000, false);
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
while (ci.hasNext()) {
rewriter.append(ci.next());
if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
}
}
try {
rewriter.throwDuringPrepare(earlyException);
rewriter.prepareToCommit();
} catch (Throwable t) {
rewriter.abort();
}
} finally {
if (offline)
s.selfRef().release();
}
LifecycleTransaction.waitForDeletions();
int filecount = assertFileCounts(s.descriptor.directory.list());
assertEquals(filecount, 1);
if (!offline) {
assertEquals(1, cfs.getLiveSSTables().size());
validateCFS(cfs);
truncate(cfs);
} else {
assertEquals(0, cfs.getLiveSSTables().size());
cfs.truncateBlocking();
}
filecount = assertFileCounts(s.descriptor.directory.list());
if (offline) {
// the file is not added to the CFS, therefore not truncated away above
assertEquals(1, filecount);
for (File f : s.descriptor.directory.listFiles()) {
FileUtils.deleteRecursive(f);
}
filecount = assertFileCounts(s.descriptor.directory.list());
}
assertEquals(0, filecount);
truncate(cfs);
}
use of org.apache.cassandra.db.compaction.CompactionIterator in project cassandra by apache.
the class SSTableRewriterTest method basicTest.
@Test
public void basicTest() throws InterruptedException {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
for (int j = 0; j < 100; j++) {
new RowUpdateBuilder(cfs.metadata(), j, String.valueOf(j)).clustering("0").add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().apply();
}
cfs.forceBlockingFlush();
Set<SSTableReader> sstables = new HashSet<>(cfs.getLiveSSTables());
assertEquals(1, sstables.size());
assertEquals(sstables.iterator().next().bytesOnDisk(), cfs.metric.liveDiskSpaceUsed.getCount());
int nowInSec = FBUtilities.nowInSeconds();
try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategyManager().getScanners(sstables);
LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
SSTableRewriter writer = SSTableRewriter.constructKeepingOriginals(txn, false, 1000);
CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(nowInSec));
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID())) {
writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
while (ci.hasNext()) {
writer.append(ci.next());
}
writer.finish();
}
LifecycleTransaction.waitForDeletions();
assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.list()));
validateCFS(cfs);
truncate(cfs);
}
use of org.apache.cassandra.db.compaction.CompactionIterator in project cassandra by apache.
the class SSTableRewriterTest method testAllKeysReadable.
@Test
public void testAllKeysReadable() throws Exception {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
for (int i = 0; i < 100; i++) {
String key = Integer.toString(i);
for (int j = 0; j < 10; j++) new RowUpdateBuilder(cfs.metadata(), 100, key).clustering(Integer.toString(j)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().apply();
}
cfs.forceBlockingFlush();
cfs.forceMajorCompaction();
validateKeys(keyspace);
assertEquals(1, cfs.getLiveSSTables().size());
SSTableReader s = cfs.getLiveSSTables().iterator().next();
Set<SSTableReader> compacting = new HashSet<>();
compacting.add(s);
int keyCount = 0;
try (ISSTableScanner scanner = compacting.iterator().next().getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0);
LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 1, false);
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
while (ci.hasNext()) {
rewriter.append(ci.next());
if (keyCount % 10 == 0) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
}
keyCount++;
validateKeys(keyspace);
}
rewriter.finish();
}
validateKeys(keyspace);
LifecycleTransaction.waitForDeletions();
validateCFS(cfs);
truncate(cfs);
}
use of org.apache.cassandra.db.compaction.CompactionIterator in project cassandra by apache.
the class SSTableRewriterTest method testCanonicalView.
@Test
public void testCanonicalView() throws Exception {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> sstables = Sets.newHashSet(s);
assertEquals(1, sstables.size());
boolean checked = false;
try (ISSTableScanner scanner = sstables.iterator().next().getScanner();
CompactionController controller = new CompactionController(cfs, sstables, 0);
LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
SSTableRewriter writer = new SSTableRewriter(txn, 1000, 10000000, false);
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
while (ci.hasNext()) {
writer.append(ci.next());
if (!checked && writer.currentWriter().getFilePointer() > 15000000) {
checked = true;
ColumnFamilyStore.ViewFragment viewFragment = cfs.select(View.selectFunction(SSTableSet.CANONICAL));
// canonical view should have only one SSTable which is not opened early.
assertEquals(1, viewFragment.sstables.size());
SSTableReader sstable = viewFragment.sstables.get(0);
assertEquals(s.descriptor, sstable.descriptor);
assertTrue("Found early opened SSTable in canonical view: " + sstable.getFilename(), sstable.openReason != SSTableReader.OpenReason.EARLY);
}
}
}
truncateCF();
validateCFS(cfs);
}
use of org.apache.cassandra.db.compaction.CompactionIterator in project cassandra by apache.
the class SSTableRewriterTest method testNumberOfFiles_truncate.
@Test
public void testNumberOfFiles_truncate() throws Exception {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
cfs.disableAutoCompaction();
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> compacting = Sets.newHashSet(s);
List<SSTableReader> sstables;
int files = 1;
try (ISSTableScanner scanner = s.getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0);
LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 10000000, false);
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
while (ci.hasNext()) {
rewriter.append(ci.next());
if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
files++;
// we have one original file plus the ones we have switched out.
assertEquals(cfs.getLiveSSTables().size(), files);
}
}
sstables = rewriter.finish();
}
LifecycleTransaction.waitForDeletions();
assertFileCounts(s.descriptor.directory.list());
validateCFS(cfs);
}
Aggregations