use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class AntiCompactionTest method shouldMutate.
private void shouldMutate(long repairedAt, UUID pendingRepair) throws InterruptedException, IOException {
ColumnFamilyStore store = prepareColumnFamilyStore();
Collection<SSTableReader> sstables = getUnrepairedSSTables(store);
assertEquals(store.getLiveSSTables().size(), sstables.size());
Range<Token> range = new Range<Token>(new BytesToken("0".getBytes()), new BytesToken("9999".getBytes()));
List<Range<Token>> ranges = Arrays.asList(range);
UUID parentRepairSession = UUID.randomUUID();
try (LifecycleTransaction txn = store.getTracker().tryModify(sstables, OperationType.ANTICOMPACTION);
Refs<SSTableReader> refs = Refs.ref(sstables)) {
CompactionManager.instance.performAnticompaction(store, ranges, refs, txn, repairedAt, pendingRepair, parentRepairSession);
}
assertThat(store.getLiveSSTables().size(), is(1));
assertThat(Iterables.get(store.getLiveSSTables(), 0).isRepaired(), is(repairedAt != UNREPAIRED_SSTABLE));
assertThat(Iterables.get(store.getLiveSSTables(), 0).isPendingRepair(), is(pendingRepair != NO_PENDING_REPAIR));
assertThat(Iterables.get(store.getLiveSSTables(), 0).selfRef().globalCount(), is(1));
assertThat(store.getTracker().getCompacting().size(), is(0));
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class AntiCompactionTest method antiCompactTen.
public void antiCompactTen(String compactionStrategy) throws InterruptedException, IOException {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF);
store.disableAutoCompaction();
for (int table = 0; table < 10; table++) {
generateSStable(store, Integer.toString(table));
}
Collection<SSTableReader> sstables = getUnrepairedSSTables(store);
assertEquals(store.getLiveSSTables().size(), sstables.size());
Range<Token> range = new Range<Token>(new BytesToken("0".getBytes()), new BytesToken("4".getBytes()));
List<Range<Token>> ranges = Arrays.asList(range);
long repairedAt = 1000;
UUID parentRepairSession = UUID.randomUUID();
try (LifecycleTransaction txn = store.getTracker().tryModify(sstables, OperationType.ANTICOMPACTION);
Refs<SSTableReader> refs = Refs.ref(sstables)) {
CompactionManager.instance.performAnticompaction(store, ranges, refs, txn, repairedAt, NO_PENDING_REPAIR, parentRepairSession);
}
/*
Anticompaction will be anti-compacting 10 SSTables but will be doing this two at a time
so there will be no net change in the number of sstables
*/
assertEquals(10, store.getLiveSSTables().size());
int repairedKeys = 0;
int nonRepairedKeys = 0;
for (SSTableReader sstable : store.getLiveSSTables()) {
try (ISSTableScanner scanner = sstable.getScanner()) {
while (scanner.hasNext()) {
try (UnfilteredRowIterator row = scanner.next()) {
if (sstable.isRepaired()) {
assertTrue(range.contains(row.partitionKey().getToken()));
assertEquals(repairedAt, sstable.getSSTableMetadata().repairedAt);
repairedKeys++;
} else {
assertFalse(range.contains(row.partitionKey().getToken()));
assertEquals(ActiveRepairService.UNREPAIRED_SSTABLE, sstable.getSSTableMetadata().repairedAt);
nonRepairedKeys++;
}
}
}
}
}
assertEquals(repairedKeys, 40);
assertEquals(nonRepairedKeys, 60);
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class CompactionAwareWriterTest method testDefaultCompactionWriter.
@Test
public void testDefaultCompactionWriter() throws Throwable {
Keyspace ks = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = ks.getColumnFamilyStore(TABLE);
int rowCount = 1000;
cfs.disableAutoCompaction();
populate(rowCount);
LifecycleTransaction txn = cfs.getTracker().tryModify(cfs.getLiveSSTables(), OperationType.COMPACTION);
long beforeSize = txn.originals().iterator().next().onDiskLength();
CompactionAwareWriter writer = new DefaultCompactionWriter(cfs, cfs.getDirectories(), txn, txn.originals());
int rows = compact(cfs, txn, writer);
assertEquals(1, cfs.getLiveSSTables().size());
assertEquals(rowCount, rows);
assertEquals(beforeSize, cfs.getLiveSSTables().iterator().next().onDiskLength());
validateData(cfs, rowCount);
cfs.truncateBlocking();
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class IndexSummaryManagerTest method testRedistributeSummaries.
@Test(timeout = 10000)
public void testRedistributeSummaries() throws IOException {
String ksname = KEYSPACE1;
// index interval of 8, no key caching
String cfname = CF_STANDARDLOWiINTERVAL;
Keyspace keyspace = Keyspace.open(ksname);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
int numSSTables = 4;
int numRows = 256;
createSSTables(ksname, cfname, numSSTables, numRows);
int minSamplingLevel = (BASE_SAMPLING_LEVEL * cfs.metadata().params.minIndexInterval) / cfs.metadata().params.maxIndexInterval;
List<SSTableReader> sstables = new ArrayList<>(cfs.getLiveSSTables());
for (SSTableReader sstable : sstables) sstable.overrideReadMeter(new RestorableMeter(100.0, 100.0));
long singleSummaryOffHeapSpace = sstables.get(0).getIndexSummaryOffHeapSize();
// there should be enough space to not downsample anything
try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN)) {
sstables = redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), (singleSummaryOffHeapSpace * numSSTables));
}
for (SSTableReader sstable : sstables) assertEquals(BASE_SAMPLING_LEVEL, sstable.getIndexSummarySamplingLevel());
assertEquals(singleSummaryOffHeapSpace * numSSTables, totalOffHeapSize(sstables));
validateData(cfs, numRows);
// everything should get cut in half
assert sstables.size() == 4;
try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN)) {
sstables = redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), (singleSummaryOffHeapSpace * (numSSTables / 2)));
}
for (SSTableReader sstable : sstables) assertEquals(BASE_SAMPLING_LEVEL / 2, sstable.getIndexSummarySamplingLevel());
validateData(cfs, numRows);
// everything should get cut to a quarter
try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN)) {
sstables = redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), (singleSummaryOffHeapSpace * (numSSTables / 4)));
}
for (SSTableReader sstable : sstables) assertEquals(BASE_SAMPLING_LEVEL / 4, sstable.getIndexSummarySamplingLevel());
validateData(cfs, numRows);
// upsample back up to half
try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN)) {
sstables = redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), (singleSummaryOffHeapSpace * (numSSTables / 2) + 4));
}
assert sstables.size() == 4;
for (SSTableReader sstable : sstables) assertEquals(BASE_SAMPLING_LEVEL / 2, sstable.getIndexSummarySamplingLevel());
validateData(cfs, numRows);
// upsample back up to the original index summary
try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN)) {
sstables = redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), (singleSummaryOffHeapSpace * numSSTables));
}
for (SSTableReader sstable : sstables) assertEquals(BASE_SAMPLING_LEVEL, sstable.getIndexSummarySamplingLevel());
validateData(cfs, numRows);
// make two of the four sstables cold, only leave enough space for three full index summaries,
// so the two cold sstables should get downsampled to be half of their original size
sstables.get(0).overrideReadMeter(new RestorableMeter(50.0, 50.0));
sstables.get(1).overrideReadMeter(new RestorableMeter(50.0, 50.0));
try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN)) {
sstables = redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), (singleSummaryOffHeapSpace * 3));
}
Collections.sort(sstables, hotnessComparator);
assertEquals(BASE_SAMPLING_LEVEL / 2, sstables.get(0).getIndexSummarySamplingLevel());
assertEquals(BASE_SAMPLING_LEVEL / 2, sstables.get(1).getIndexSummarySamplingLevel());
assertEquals(BASE_SAMPLING_LEVEL, sstables.get(2).getIndexSummarySamplingLevel());
assertEquals(BASE_SAMPLING_LEVEL, sstables.get(3).getIndexSummarySamplingLevel());
validateData(cfs, numRows);
// small increases or decreases in the read rate don't result in downsampling or upsampling
double lowerRate = 50.0 * (DOWNSAMPLE_THESHOLD + (DOWNSAMPLE_THESHOLD * 0.10));
double higherRate = 50.0 * (UPSAMPLE_THRESHOLD - (UPSAMPLE_THRESHOLD * 0.10));
sstables.get(0).overrideReadMeter(new RestorableMeter(lowerRate, lowerRate));
sstables.get(1).overrideReadMeter(new RestorableMeter(higherRate, higherRate));
try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN)) {
sstables = redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), (singleSummaryOffHeapSpace * 3));
}
Collections.sort(sstables, hotnessComparator);
assertEquals(BASE_SAMPLING_LEVEL / 2, sstables.get(0).getIndexSummarySamplingLevel());
assertEquals(BASE_SAMPLING_LEVEL / 2, sstables.get(1).getIndexSummarySamplingLevel());
assertEquals(BASE_SAMPLING_LEVEL, sstables.get(2).getIndexSummarySamplingLevel());
assertEquals(BASE_SAMPLING_LEVEL, sstables.get(3).getIndexSummarySamplingLevel());
validateData(cfs, numRows);
// reset, and then this time, leave enough space for one of the cold sstables to not get downsampled
sstables = resetSummaries(cfs, sstables, singleSummaryOffHeapSpace);
sstables.get(0).overrideReadMeter(new RestorableMeter(1.0, 1.0));
sstables.get(1).overrideReadMeter(new RestorableMeter(2.0, 2.0));
sstables.get(2).overrideReadMeter(new RestorableMeter(1000.0, 1000.0));
sstables.get(3).overrideReadMeter(new RestorableMeter(1000.0, 1000.0));
try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN)) {
sstables = redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), (singleSummaryOffHeapSpace * 3) + 50);
}
Collections.sort(sstables, hotnessComparator);
if (sstables.get(0).getIndexSummarySamplingLevel() == minSamplingLevel)
assertEquals(BASE_SAMPLING_LEVEL, sstables.get(1).getIndexSummarySamplingLevel());
else
assertEquals(BASE_SAMPLING_LEVEL, sstables.get(0).getIndexSummarySamplingLevel());
assertEquals(BASE_SAMPLING_LEVEL, sstables.get(2).getIndexSummarySamplingLevel());
assertEquals(BASE_SAMPLING_LEVEL, sstables.get(3).getIndexSummarySamplingLevel());
validateData(cfs, numRows);
// Cause a mix of upsampling and downsampling. We'll leave enough space for two full index summaries. The two
// coldest sstables will get downsampled to 4/128 of their size, leaving us with 1 and 92/128th index
// summaries worth of space. The hottest sstable should get a full index summary, and the one in the middle
// should get the remainder.
sstables.get(0).overrideReadMeter(new RestorableMeter(0.0, 0.0));
sstables.get(1).overrideReadMeter(new RestorableMeter(0.0, 0.0));
sstables.get(2).overrideReadMeter(new RestorableMeter(92, 92));
sstables.get(3).overrideReadMeter(new RestorableMeter(128.0, 128.0));
try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN)) {
sstables = redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), (long) (singleSummaryOffHeapSpace + (singleSummaryOffHeapSpace * (92.0 / BASE_SAMPLING_LEVEL))));
}
Collections.sort(sstables, hotnessComparator);
// at the min sampling level
assertEquals(1, sstables.get(0).getIndexSummarySize());
// at the min sampling level
assertEquals(1, sstables.get(0).getIndexSummarySize());
assertTrue(sstables.get(2).getIndexSummarySamplingLevel() > minSamplingLevel);
assertTrue(sstables.get(2).getIndexSummarySamplingLevel() < BASE_SAMPLING_LEVEL);
assertEquals(BASE_SAMPLING_LEVEL, sstables.get(3).getIndexSummarySamplingLevel());
validateData(cfs, numRows);
// Don't leave enough space for even the minimal index summaries
try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN)) {
sstables = redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), 10);
}
for (SSTableReader sstable : sstables) // at the min sampling level
assertEquals(1, sstable.getIndexSummarySize());
validateData(cfs, numRows);
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class IndexSummaryManagerTest method testChangeMaxIndexInterval.
@Test
public void testChangeMaxIndexInterval() throws IOException {
String ksname = KEYSPACE1;
// index interval of 8, no key caching
String cfname = CF_STANDARDLOWiINTERVAL;
Keyspace keyspace = Keyspace.open(ksname);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
int numSSTables = 1;
int numRows = 256;
createSSTables(ksname, cfname, numSSTables, numRows);
List<SSTableReader> sstables = new ArrayList<>(cfs.getLiveSSTables());
for (SSTableReader sstable : sstables) sstable.overrideReadMeter(new RestorableMeter(100.0, 100.0));
try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN)) {
redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), 10);
}
sstables = new ArrayList<>(cfs.getLiveSSTables());
for (SSTableReader sstable : sstables) assertEquals(cfs.metadata().params.maxIndexInterval, sstable.getEffectiveIndexInterval(), 0.01);
// halve the max_index_interval
MigrationManager.announceTableUpdate(cfs.metadata().unbuild().maxIndexInterval(cfs.metadata().params.maxIndexInterval / 2).build(), true);
try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN)) {
redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), 1);
}
sstables = new ArrayList<>(cfs.getLiveSSTables());
for (SSTableReader sstable : sstables) {
assertEquals(cfs.metadata().params.maxIndexInterval, sstable.getEffectiveIndexInterval(), 0.01);
assertEquals(numRows / cfs.metadata().params.maxIndexInterval, sstable.getIndexSummarySize());
}
// return max_index_interval to its original value
MigrationManager.announceTableUpdate(cfs.metadata().unbuild().maxIndexInterval(cfs.metadata().params.maxIndexInterval * 2).build(), true);
try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN)) {
redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), 1);
}
for (SSTableReader sstable : cfs.getLiveSSTables()) {
assertEquals(cfs.metadata().params.maxIndexInterval, sstable.getEffectiveIndexInterval(), 0.01);
assertEquals(numRows / cfs.metadata().params.maxIndexInterval, sstable.getIndexSummarySize());
}
}
Aggregations