use of org.apache.cassandra.db.compaction.CompactionController in project cassandra by apache.
the class RealTransactionsTest method replaceSSTable.
private SSTableReader replaceSSTable(ColumnFamilyStore cfs, LifecycleTransaction txn, boolean fail) {
List<SSTableReader> newsstables = null;
int nowInSec = FBUtilities.nowInSeconds();
try (CompactionController controller = new CompactionController(cfs, txn.originals(), cfs.gcBefore(FBUtilities.nowInSeconds()))) {
try (SSTableRewriter rewriter = SSTableRewriter.constructKeepingOriginals(txn, false, 1000);
AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategyManager().getScanners(txn.originals());
CompactionIterator ci = new CompactionIterator(txn.opType(), scanners.scanners, controller, nowInSec, txn.opId())) {
long lastCheckObsoletion = nanoTime();
File directory = txn.originals().iterator().next().descriptor.directory;
Descriptor desc = cfs.newSSTableDescriptor(directory);
TableMetadataRef metadata = Schema.instance.getTableMetadataRef(desc);
rewriter.switchWriter(SSTableWriter.create(metadata, desc, 0, 0, null, false, 0, SerializationHeader.make(cfs.metadata(), txn.originals()), cfs.indexManager.listIndexes(), txn));
while (ci.hasNext()) {
rewriter.append(ci.next());
if (nanoTime() - lastCheckObsoletion > TimeUnit.MINUTES.toNanos(1L)) {
controller.maybeRefreshOverlaps();
lastCheckObsoletion = nanoTime();
}
}
if (!fail)
newsstables = rewriter.finish();
else
rewriter.abort();
}
}
assertTrue(fail || newsstables != null);
if (newsstables != null) {
Assert.assertEquals(1, newsstables.size());
return newsstables.iterator().next();
}
return null;
}
use of org.apache.cassandra.db.compaction.CompactionController in project cassandra by apache.
the class PendingAntiCompactionTest method testBlockedAcquisition.
@Test
public void testBlockedAcquisition() throws ExecutionException, InterruptedException, TimeoutException {
cfs.disableAutoCompaction();
ExecutorService es = Executors.newFixedThreadPool(1);
makeSSTables(2);
UUID prsid = UUID.randomUUID();
Set<SSTableReader> sstables = cfs.getLiveSSTables();
List<ISSTableScanner> scanners = sstables.stream().map(SSTableReader::getScanner).collect(Collectors.toList());
try {
try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.ANTICOMPACTION);
CompactionController controller = new CompactionController(cfs, sstables, 0);
CompactionIterator ci = CompactionManager.getAntiCompactionIterator(scanners, controller, 0, UUID.randomUUID(), CompactionManager.instance.active, () -> false)) {
// `ci` is our imaginary ongoing anticompaction which makes no progress until after 30s
// now we try to start a new AC, which will try to cancel all ongoing compactions
CompactionManager.instance.active.beginCompaction(ci);
PendingAntiCompaction pac = new PendingAntiCompaction(prsid, Collections.singleton(cfs), atEndpoint(FULL_RANGE, NO_RANGES), 0, 0, es, () -> false);
ListenableFuture fut = pac.run();
try {
fut.get(30, TimeUnit.SECONDS);
fail("the future should throw exception since we try to start a new anticompaction when one is already running");
} catch (ExecutionException e) {
assertTrue(e.getCause() instanceof PendingAntiCompaction.SSTableAcquisitionException);
}
assertEquals(1, getCompactionsFor(cfs).size());
for (CompactionInfo.Holder holder : getCompactionsFor(cfs)) assertFalse(holder.isStopRequested());
}
} finally {
es.shutdown();
ISSTableScanner.closeAllAndPropagate(scanners, null);
}
}
use of org.apache.cassandra.db.compaction.CompactionController in project cassandra by apache.
the class SSTableRewriterTest method testNumberOfFiles_dont_clean_readers.
@Test
public void testNumberOfFiles_dont_clean_readers() {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> compacting = Sets.newHashSet(s);
List<SSTableReader> sstables;
int files = 1;
try (ISSTableScanner scanner = s.getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0);
LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 10000000, false, true);
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
while (ci.hasNext()) {
rewriter.append(ci.next());
if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
files++;
// we have one original file plus the ones we have switched out.
assertEquals(cfs.getLiveSSTables().size(), files);
}
}
sstables = rewriter.finish();
}
assertEquals(files, sstables.size());
assertEquals(files, cfs.getLiveSSTables().size());
LifecycleTransaction.waitForDeletions();
assertFileCounts(s.descriptor.directory.tryListNames());
validateCFS(cfs);
}
use of org.apache.cassandra.db.compaction.CompactionController in project cassandra by apache.
the class SSTableRewriterTest method testNumberOfFiles_abort.
private void testNumberOfFiles_abort(RewriterTest test) {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
DecoratedKey origFirst = s.first;
DecoratedKey origLast = s.last;
long startSize = cfs.metric.liveDiskSpaceUsed.getCount();
Set<SSTableReader> compacting = Sets.newHashSet(s);
try (ISSTableScanner scanner = s.getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0);
LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 10000000, false, true)) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
test.run(scanner, controller, s, cfs, rewriter, txn);
}
LifecycleTransaction.waitForDeletions();
assertEquals(startSize, cfs.metric.liveDiskSpaceUsed.getCount());
assertEquals(1, cfs.getLiveSSTables().size());
assertFileCounts(s.descriptor.directory.tryListNames());
assertEquals(cfs.getLiveSSTables().iterator().next().first, origFirst);
assertEquals(cfs.getLiveSSTables().iterator().next().last, origLast);
validateCFS(cfs);
}
use of org.apache.cassandra.db.compaction.CompactionController in project cassandra by apache.
the class SSTableRewriterTest method testAbortHelper.
private void testAbortHelper(boolean earlyException, boolean offline) {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
SSTableReader s = writeFile(cfs, 1000);
if (!offline)
cfs.addSSTable(s);
Set<SSTableReader> compacting = Sets.newHashSet(s);
try (ISSTableScanner scanner = compacting.iterator().next().getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0);
LifecycleTransaction txn = offline ? LifecycleTransaction.offline(OperationType.UNKNOWN, compacting) : cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
SSTableRewriter rewriter = new SSTableRewriter(txn, 100, 10000000, false, true);
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
while (ci.hasNext()) {
rewriter.append(ci.next());
if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
}
}
try {
rewriter.throwDuringPrepare(earlyException);
rewriter.prepareToCommit();
} catch (Throwable t) {
rewriter.abort();
}
} finally {
if (offline)
s.selfRef().release();
}
LifecycleTransaction.waitForDeletions();
int filecount = assertFileCounts(s.descriptor.directory.tryListNames());
assertEquals(filecount, 1);
if (!offline) {
assertEquals(1, cfs.getLiveSSTables().size());
validateCFS(cfs);
truncate(cfs);
} else {
assertEquals(0, cfs.getLiveSSTables().size());
cfs.truncateBlocking();
}
filecount = assertFileCounts(s.descriptor.directory.tryListNames());
if (offline) {
// the file is not added to the CFS, therefore not truncated away above
assertEquals(1, filecount);
for (File f : s.descriptor.directory.tryList()) {
FileUtils.deleteRecursive(f);
}
filecount = assertFileCounts(s.descriptor.directory.tryListNames());
}
assertEquals(0, filecount);
truncate(cfs);
}
Aggregations