use of org.apache.cassandra.db.compaction.CompactionInterruptedException in project cassandra by apache.
the class IndexSummaryRedistribution method redistributeSummaries.
public List<SSTableReader> redistributeSummaries() throws IOException {
logger.info("Redistributing index summaries");
List<SSTableReader> redistribute = new ArrayList<>();
for (LifecycleTransaction txn : transactions.values()) {
redistribute.addAll(txn.originals());
}
long total = 0;
for (SSTableReader sstable : Iterables.concat(compacting, redistribute)) total += sstable.getIndexSummaryOffHeapSize();
logger.trace("Beginning redistribution of index summaries for {} sstables with memory pool size {} MB; current spaced used is {} MB", redistribute.size(), memoryPoolBytes / 1024L / 1024L, total / 1024.0 / 1024.0);
final Map<SSTableReader, Double> readRates = new HashMap<>(redistribute.size());
double totalReadsPerSec = 0.0;
for (SSTableReader sstable : redistribute) {
if (isStopRequested())
throw new CompactionInterruptedException(getCompactionInfo());
if (sstable.getReadMeter() != null) {
Double readRate = sstable.getReadMeter().fifteenMinuteRate();
totalReadsPerSec += readRate;
readRates.put(sstable, readRate);
}
}
logger.trace("Total reads/sec across all sstables in index summary resize process: {}", totalReadsPerSec);
// copy and sort by read rates (ascending)
List<SSTableReader> sstablesByHotness = new ArrayList<>(redistribute);
Collections.sort(sstablesByHotness, new ReadRateComparator(readRates));
long remainingBytes = memoryPoolBytes;
for (SSTableReader sstable : compacting) remainingBytes -= sstable.getIndexSummaryOffHeapSize();
logger.trace("Index summaries for compacting SSTables are using {} MB of space", (memoryPoolBytes - remainingBytes) / 1024.0 / 1024.0);
List<SSTableReader> newSSTables = adjustSamplingLevels(sstablesByHotness, transactions, totalReadsPerSec, remainingBytes);
for (LifecycleTransaction txn : transactions.values()) txn.finish();
total = 0;
for (SSTableReader sstable : Iterables.concat(compacting, newSSTables)) total += sstable.getIndexSummaryOffHeapSize();
logger.trace("Completed resizing of index summaries; current approximate memory used: {}", FBUtilities.prettyPrintMemory(total));
return newSSTables;
}
use of org.apache.cassandra.db.compaction.CompactionInterruptedException in project cassandra by apache.
the class CollatedViewIndexBuilder method build.
public void build() {
try {
int pageSize = cfs.indexManager.calculateIndexingPageSize();
while (iter.hasNext()) {
if (isStopRequested())
throw new CompactionInterruptedException(getCompactionInfo());
DecoratedKey key = iter.next();
cfs.indexManager.indexPartition(key, indexers, pageSize);
}
} finally {
iter.close();
}
}
use of org.apache.cassandra.db.compaction.CompactionInterruptedException in project cassandra by apache.
the class SASIIndexBuilder method build.
public void build() {
AbstractType<?> keyValidator = cfs.metadata().partitionKeyType;
for (Map.Entry<SSTableReader, Map<ColumnMetadata, ColumnIndex>> e : sstables.entrySet()) {
SSTableReader sstable = e.getKey();
Map<ColumnMetadata, ColumnIndex> indexes = e.getValue();
try (RandomAccessReader dataFile = sstable.openDataReader()) {
PerSSTableIndexWriter indexWriter = SASIIndex.newWriter(keyValidator, sstable.descriptor, indexes, OperationType.COMPACTION);
long previousKeyPosition = 0;
try (KeyIterator keys = new KeyIterator(sstable.descriptor, cfs.metadata())) {
while (keys.hasNext()) {
if (isStopRequested())
throw new CompactionInterruptedException(getCompactionInfo());
final DecoratedKey key = keys.next();
final long keyPosition = keys.getKeyPosition();
indexWriter.startPartition(key, keyPosition);
try {
RowIndexEntry indexEntry = sstable.getPosition(key, SSTableReader.Operator.EQ);
dataFile.seek(indexEntry.position);
// key
ByteBufferUtil.readWithShortLength(dataFile);
try (SSTableIdentityIterator partition = SSTableIdentityIterator.create(sstable, dataFile, key)) {
// if the row has statics attached, it has to be indexed separately
if (cfs.metadata().hasStaticColumns())
indexWriter.nextUnfilteredCluster(partition.staticRow());
while (partition.hasNext()) indexWriter.nextUnfilteredCluster(partition.next());
}
} catch (IOException ex) {
throw new FSReadError(ex, sstable.getFilename());
}
bytesProcessed += keyPosition - previousKeyPosition;
previousKeyPosition = keyPosition;
}
completeSSTable(indexWriter, sstable, indexes.values());
}
}
}
}
use of org.apache.cassandra.db.compaction.CompactionInterruptedException in project eiger by wlloyd.
the class SecondaryIndexBuilder method build.
public void build() {
while (iter.hasNext()) {
if (isStopped())
throw new CompactionInterruptedException(getCompactionInfo());
DecoratedKey<?> key = iter.next();
Table.indexRow(key, cfs, columns);
}
try {
iter.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
use of org.apache.cassandra.db.compaction.CompactionInterruptedException in project cassandra by apache.
the class IndexSummaryManagerTest method testCancelIndex.
@Test
public void testCancelIndex() throws Exception {
String ksname = KEYSPACE1;
// index interval of 8, no key caching
String cfname = CF_STANDARDLOWiINTERVAL;
Keyspace keyspace = Keyspace.open(ksname);
final ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
final int numSSTables = 4;
int numRows = 256;
createSSTables(ksname, cfname, numSSTables, numRows);
final List<SSTableReader> sstables = new ArrayList<>(cfs.getLiveSSTables());
for (SSTableReader sstable : sstables) sstable.overrideReadMeter(new RestorableMeter(100.0, 100.0));
final long singleSummaryOffHeapSpace = sstables.get(0).getIndexSummaryOffHeapSize();
// everything should get cut in half
final AtomicReference<CompactionInterruptedException> exception = new AtomicReference<>();
// barrier to control when redistribution runs
final CountDownLatch barrier = new CountDownLatch(1);
Thread t = NamedThreadFactory.createThread(new Runnable() {
public void run() {
try {
// Don't leave enough space for even the minimal index summaries
try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN)) {
IndexSummaryManager.redistributeSummaries(new ObservableRedistribution(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), singleSummaryOffHeapSpace, barrier));
}
} catch (CompactionInterruptedException ex) {
exception.set(ex);
} catch (IOException ignored) {
}
}
});
t.start();
while (CompactionManager.instance.getActiveCompactions() == 0 && t.isAlive()) Thread.sleep(1);
// to ensure that the stop condition check in IndexSummaryRedistribution::redistributeSummaries
// is made *after* the halt request is made to the CompactionManager, don't allow the redistribution
// to proceed until stopCompaction has been called.
CompactionManager.instance.stopCompaction("INDEX_SUMMARY");
// allows the redistribution to proceed
barrier.countDown();
t.join();
assertNotNull("Expected compaction interrupted exception", exception.get());
assertTrue("Expected no active compactions", CompactionMetrics.getCompactions().isEmpty());
Set<SSTableReader> beforeRedistributionSSTables = new HashSet<>(sstables);
Set<SSTableReader> afterCancelSSTables = new HashSet<>(cfs.getLiveSSTables());
Set<SSTableReader> disjoint = Sets.symmetricDifference(beforeRedistributionSSTables, afterCancelSSTables);
assertTrue(String.format("Mismatched files before and after cancelling redistribution: %s", Joiner.on(",").join(disjoint)), disjoint.isEmpty());
validateData(cfs, numRows);
}
Aggregations