use of org.apache.cassandra.metrics.RestorableMeter in project cassandra by apache.
the class SizeTieredCompactionStrategyTest method testPrepBucket.
@Test
public void testPrepBucket() throws Exception {
String ksname = KEYSPACE1;
String cfname = "Standard1";
Keyspace keyspace = Keyspace.open(ksname);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
cfs.truncateBlocking();
cfs.disableAutoCompaction();
ByteBuffer value = ByteBuffer.wrap(new byte[100]);
// create 3 sstables
int numSSTables = 3;
for (int r = 0; r < numSSTables; r++) {
String key = String.valueOf(r);
new RowUpdateBuilder(cfs.metadata(), 0, key).clustering("column").add("val", value).build().applyUnsafe();
cfs.forceBlockingFlush();
}
cfs.forceBlockingFlush();
List<SSTableReader> sstrs = new ArrayList<>(cfs.getLiveSSTables());
Pair<List<SSTableReader>, Double> bucket;
List<SSTableReader> interestingBucket = mostInterestingBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32);
assertTrue("nothing should be returned when all buckets are below the min threshold", interestingBucket.isEmpty());
sstrs.get(0).overrideReadMeter(new RestorableMeter(100.0, 100.0));
sstrs.get(1).overrideReadMeter(new RestorableMeter(200.0, 200.0));
sstrs.get(2).overrideReadMeter(new RestorableMeter(300.0, 300.0));
long estimatedKeys = sstrs.get(0).estimatedKeys();
// if we have more than the max threshold, the coldest should be dropped
bucket = trimToThresholdWithHotness(sstrs, 2);
assertEquals("one bucket should have been dropped", 2, bucket.left.size());
double expectedBucketHotness = (200.0 + 300.0) / estimatedKeys;
assertEquals(String.format("bucket hotness (%f) should be close to %f", bucket.right, expectedBucketHotness), expectedBucketHotness, bucket.right, 1.0);
}
use of org.apache.cassandra.metrics.RestorableMeter in project cassandra by apache.
the class IndexSummaryManagerTest method testPauseIndexSummaryManager.
@Test
public void testPauseIndexSummaryManager() throws Exception {
String ksname = KEYSPACE1;
// index interval of 8, no key caching
String cfname = CF_STANDARDLOWiINTERVAL;
Keyspace keyspace = Keyspace.open(ksname);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
int numSSTables = 4;
int numRows = 256;
createSSTables(ksname, cfname, numSSTables, numRows);
List<SSTableReader> sstables = new ArrayList<>(cfs.getLiveSSTables());
for (SSTableReader sstable : sstables) sstable.overrideReadMeter(new RestorableMeter(100.0, 100.0));
long singleSummaryOffHeapSpace = sstables.get(0).getIndexSummaryOffHeapSize();
// everything should get cut in half
assert sstables.size() == numSSTables;
try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN)) {
try (AutoCloseable toresume = CompactionManager.instance.pauseGlobalCompaction()) {
sstables = redistributeSummaries(Collections.emptyList(), of(cfs.metadata().id, txn), (singleSummaryOffHeapSpace * (numSSTables / 2)));
fail("The redistribution should fail - we got paused before adding to active compactions, but after marking compacting");
}
} catch (CompactionInterruptedException e) {
// expected
}
for (SSTableReader sstable : sstables) assertEquals(BASE_SAMPLING_LEVEL, sstable.getIndexSummarySamplingLevel());
validateData(cfs, numRows);
assertOnDiskState(cfs, numSSTables);
}
use of org.apache.cassandra.metrics.RestorableMeter in project cassandra by apache.
the class IndexSummaryManagerTest method testChangeMinIndexInterval.
@Test
public void testChangeMinIndexInterval() throws IOException {
String ksname = KEYSPACE1;
// index interval of 8, no key caching
String cfname = CF_STANDARDLOWiINTERVAL;
Keyspace keyspace = Keyspace.open(ksname);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
int numSSTables = 1;
int numRows = 256;
createSSTables(ksname, cfname, numSSTables, numRows);
List<SSTableReader> sstables = new ArrayList<>(cfs.getLiveSSTables());
for (SSTableReader sstable : sstables) sstable.overrideReadMeter(new RestorableMeter(100.0, 100.0));
for (SSTableReader sstable : sstables) assertEquals(cfs.metadata().params.minIndexInterval, sstable.getEffectiveIndexInterval(), 0.001);
// double the min_index_interval
MigrationManager.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval * 2).build(), true);
IndexSummaryManager.instance.redistributeSummaries();
for (SSTableReader sstable : cfs.getLiveSSTables()) {
assertEquals(cfs.metadata().params.minIndexInterval, sstable.getEffectiveIndexInterval(), 0.001);
assertEquals(numRows / cfs.metadata().params.minIndexInterval, sstable.getIndexSummarySize());
}
// return min_index_interval to its original value
MigrationManager.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval).build(), true);
IndexSummaryManager.instance.redistributeSummaries();
for (SSTableReader sstable : cfs.getLiveSSTables()) {
assertEquals(cfs.metadata().params.minIndexInterval, sstable.getEffectiveIndexInterval(), 0.001);
assertEquals(numRows / cfs.metadata().params.minIndexInterval, sstable.getIndexSummarySize());
}
// halve the min_index_interval, but constrain the available space to exactly what we have now; as a result,
// the summary shouldn't change
MigrationManager.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval / 2).build(), true);
SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
long summarySpace = sstable.getIndexSummaryOffHeapSize();
try (LifecycleTransaction txn = cfs.getTracker().tryModify(asList(sstable), OperationType.UNKNOWN)) {
redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), summarySpace);
}
sstable = cfs.getLiveSSTables().iterator().next();
assertEquals(originalMinIndexInterval, sstable.getEffectiveIndexInterval(), 0.001);
assertEquals(numRows / originalMinIndexInterval, sstable.getIndexSummarySize());
// keep the min_index_interval the same, but now give the summary enough space to grow by 50%
double previousInterval = sstable.getEffectiveIndexInterval();
int previousSize = sstable.getIndexSummarySize();
try (LifecycleTransaction txn = cfs.getTracker().tryModify(asList(sstable), OperationType.UNKNOWN)) {
redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), (long) Math.ceil(summarySpace * 1.5));
}
sstable = cfs.getLiveSSTables().iterator().next();
assertEquals(previousSize * 1.5, (double) sstable.getIndexSummarySize(), 1);
assertEquals(previousInterval * (1.0 / 1.5), sstable.getEffectiveIndexInterval(), 0.001);
// return min_index_interval to it's original value (double it), but only give the summary enough space
// to have an effective index interval of twice the new min
MigrationManager.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval).build(), true);
try (LifecycleTransaction txn = cfs.getTracker().tryModify(asList(sstable), OperationType.UNKNOWN)) {
redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), (long) Math.ceil(summarySpace / 2.0));
}
sstable = cfs.getLiveSSTables().iterator().next();
assertEquals(originalMinIndexInterval * 2, sstable.getEffectiveIndexInterval(), 0.001);
assertEquals(numRows / (originalMinIndexInterval * 2), sstable.getIndexSummarySize());
// raise the min_index_interval above our current effective interval, but set the max_index_interval lower
// than what we actually have space for (meaning the index summary would ideally be smaller, but this would
// result in an effective interval above the new max)
MigrationManager.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval * 4).build(), true);
MigrationManager.announceTableUpdate(cfs.metadata().unbuild().maxIndexInterval(originalMinIndexInterval * 4).build(), true);
try (LifecycleTransaction txn = cfs.getTracker().tryModify(asList(sstable), OperationType.UNKNOWN)) {
redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), 10);
}
sstable = cfs.getLiveSSTables().iterator().next();
assertEquals(cfs.metadata().params.minIndexInterval, sstable.getEffectiveIndexInterval(), 0.001);
}
use of org.apache.cassandra.metrics.RestorableMeter in project cassandra by apache.
the class IndexSummaryRedistributionTest method testMetricsLoadAfterRedistribution.
@Test
public void testMetricsLoadAfterRedistribution() throws IOException {
String ksname = KEYSPACE1;
String cfname = CF_STANDARD;
Keyspace keyspace = Keyspace.open(ksname);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
int numSSTables = 1;
int numRows = 1024 * 10;
long load = StorageMetrics.load.getCount();
// reset the load metric
StorageMetrics.load.dec(load);
createSSTables(ksname, cfname, numSSTables, numRows);
List<SSTableReader> sstables = new ArrayList<>(cfs.getLiveSSTables());
for (SSTableReader sstable : sstables) sstable.overrideReadMeter(new RestorableMeter(100.0, 100.0));
long oldSize = 0;
for (SSTableReader sstable : sstables) {
assertEquals(cfs.metadata().params.minIndexInterval, sstable.getEffectiveIndexInterval(), 0.001);
oldSize += sstable.bytesOnDisk();
}
load = StorageMetrics.load.getCount();
// Other SSTables size, e.g. schema and other system SSTables
long others = load - oldSize;
int originalMinIndexInterval = cfs.metadata().params.minIndexInterval;
// double the min_index_interval
MigrationManager.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval * 2).build(), true);
IndexSummaryManager.instance.redistributeSummaries();
long newSize = 0;
for (SSTableReader sstable : cfs.getLiveSSTables()) {
assertEquals(cfs.metadata().params.minIndexInterval, sstable.getEffectiveIndexInterval(), 0.001);
assertEquals(numRows / cfs.metadata().params.minIndexInterval, sstable.getIndexSummarySize());
newSize += sstable.bytesOnDisk();
}
newSize += others;
load = StorageMetrics.load.getCount();
// new size we calculate should be almost the same as the load in metrics
assertEquals(newSize, load, newSize / 10);
}
use of org.apache.cassandra.metrics.RestorableMeter in project cassandra by apache.
the class IndexSummaryManagerTest method testCancelIndex.
@Test
public void testCancelIndex() throws Exception {
String ksname = KEYSPACE1;
// index interval of 8, no key caching
String cfname = CF_STANDARDLOWiINTERVAL;
Keyspace keyspace = Keyspace.open(ksname);
final ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
final int numSSTables = 4;
int numRows = 256;
createSSTables(ksname, cfname, numSSTables, numRows);
final List<SSTableReader> sstables = new ArrayList<>(cfs.getLiveSSTables());
for (SSTableReader sstable : sstables) sstable.overrideReadMeter(new RestorableMeter(100.0, 100.0));
final long singleSummaryOffHeapSpace = sstables.get(0).getIndexSummaryOffHeapSize();
// everything should get cut in half
final AtomicReference<CompactionInterruptedException> exception = new AtomicReference<>();
// barrier to control when redistribution runs
final CountDownLatch barrier = new CountDownLatch(1);
Thread t = NamedThreadFactory.createThread(new Runnable() {
public void run() {
try {
// Don't leave enough space for even the minimal index summaries
try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN)) {
IndexSummaryManager.redistributeSummaries(new ObservableRedistribution(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), singleSummaryOffHeapSpace, barrier));
}
} catch (CompactionInterruptedException ex) {
exception.set(ex);
} catch (IOException ignored) {
}
}
});
t.start();
while (CompactionManager.instance.getActiveCompactions() == 0 && t.isAlive()) Thread.sleep(1);
// to ensure that the stop condition check in IndexSummaryRedistribution::redistributeSummaries
// is made *after* the halt request is made to the CompactionManager, don't allow the redistribution
// to proceed until stopCompaction has been called.
CompactionManager.instance.stopCompaction("INDEX_SUMMARY");
// allows the redistribution to proceed
barrier.countDown();
t.join();
assertNotNull("Expected compaction interrupted exception", exception.get());
assertTrue("Expected no active compactions", CompactionMetrics.getCompactions().isEmpty());
Set<SSTableReader> beforeRedistributionSSTables = new HashSet<>(sstables);
Set<SSTableReader> afterCancelSSTables = new HashSet<>(cfs.getLiveSSTables());
Set<SSTableReader> disjoint = Sets.symmetricDifference(beforeRedistributionSSTables, afterCancelSSTables);
assertTrue(String.format("Mismatched files before and after cancelling redistribution: %s", Joiner.on(",").join(disjoint)), disjoint.isEmpty());
validateData(cfs, numRows);
}
Aggregations