use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class LeveledCompactionStrategy method getMaximalTask.
// transaction is closed by AbstractCompactionTask::execute
@SuppressWarnings("resource")
public synchronized Collection<AbstractCompactionTask> getMaximalTask(int gcBefore, boolean splitOutput) {
Iterable<SSTableReader> sstables = manifest.getAllSSTables();
Iterable<SSTableReader> filteredSSTables = filterSuspectSSTables(sstables);
if (Iterables.isEmpty(sstables))
return null;
LifecycleTransaction txn = cfs.getTracker().tryModify(filteredSSTables, OperationType.COMPACTION);
if (txn == null)
return null;
return Arrays.<AbstractCompactionTask>asList(new LeveledCompactionTask(cfs, txn, 0, gcBefore, getMaxSSTableBytes(), true));
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class CompactionManager method parallelAllSSTableOperation.
/**
* Run an operation over all sstables using jobs threads
*
* @param cfs the column family store to run the operation on
* @param operation the operation to run
* @param jobs the number of threads to use - 0 means use all available. It never uses more than concurrent_compactors threads
* @return status of the operation
* @throws ExecutionException
* @throws InterruptedException
*/
@SuppressWarnings("resource")
private AllSSTableOpStatus parallelAllSSTableOperation(final ColumnFamilyStore cfs, final OneSSTableOperation operation, int jobs, OperationType operationType) throws ExecutionException, InterruptedException {
List<LifecycleTransaction> transactions = new ArrayList<>();
try (LifecycleTransaction compacting = cfs.markAllCompacting(operationType)) {
Iterable<SSTableReader> sstables = compacting != null ? Lists.newArrayList(operation.filterSSTables(compacting)) : Collections.<SSTableReader>emptyList();
if (Iterables.isEmpty(sstables)) {
logger.info("No sstables to {} for {}.{}", operationType.name(), cfs.keyspace.getName(), cfs.name);
return AllSSTableOpStatus.SUCCESSFUL;
}
List<Future<?>> futures = new ArrayList<>();
for (final SSTableReader sstable : sstables) {
final LifecycleTransaction txn = compacting.split(singleton(sstable));
transactions.add(txn);
Callable<Object> callable = new Callable<Object>() {
@Override
public Object call() throws Exception {
operation.execute(txn);
return this;
}
};
Future<?> fut = executor.submitIfRunning(callable, "paralell sstable operation");
if (!fut.isCancelled())
futures.add(fut);
else
return AllSSTableOpStatus.ABORTED;
if (jobs > 0 && futures.size() == jobs) {
FBUtilities.waitOnFutures(futures);
futures.clear();
}
}
FBUtilities.waitOnFutures(futures);
assert compacting.originals().isEmpty();
return AllSSTableOpStatus.SUCCESSFUL;
} finally {
Throwable fail = Throwables.close(null, transactions);
if (fail != null)
logger.error("Failed to cleanup lifecycle transactions {}", fail);
}
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class PendingRepairManager method getRepairFinishedCompactionTask.
@SuppressWarnings("resource")
private RepairFinishedCompactionTask getRepairFinishedCompactionTask(UUID sessionID) {
Set<SSTableReader> sstables = get(sessionID).getSSTables();
long repairedAt = ActiveRepairService.instance.consistent.local.getFinalSessionRepairedAt(sessionID);
LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.COMPACTION);
return txn == null ? null : new RepairFinishedCompactionTask(cfs, txn, sessionID, repairedAt);
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class SizeTieredCompactionStrategy method getMaximalTask.
@SuppressWarnings("resource")
public Collection<AbstractCompactionTask> getMaximalTask(final int gcBefore, boolean splitOutput) {
Iterable<SSTableReader> filteredSSTables = filterSuspectSSTables(sstables);
if (Iterables.isEmpty(filteredSSTables))
return null;
LifecycleTransaction txn = cfs.getTracker().tryModify(filteredSSTables, OperationType.COMPACTION);
if (txn == null)
return null;
if (splitOutput)
return Arrays.<AbstractCompactionTask>asList(new SplittingCompactionTask(cfs, txn, gcBefore));
return Arrays.<AbstractCompactionTask>asList(new CompactionTask(cfs, txn, gcBefore));
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class SSTableTxnWriter method createRangeAware.
// log and writer closed during doPostCleanup
@SuppressWarnings("resource")
public static SSTableTxnWriter createRangeAware(TableMetadataRef metadata, long keyCount, long repairedAt, UUID pendingRepair, SSTableFormat.Type type, int sstableLevel, SerializationHeader header) {
ColumnFamilyStore cfs = Keyspace.open(metadata.keyspace).getColumnFamilyStore(metadata.name);
LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.WRITE);
SSTableMultiWriter writer;
try {
writer = new RangeAwareSSTableWriter(cfs, keyCount, repairedAt, pendingRepair, type, sstableLevel, 0, txn, header);
} catch (IOException e) {
//as we send in 0
throw new RuntimeException(e);
}
return new SSTableTxnWriter(txn, writer);
}
Aggregations