use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class ScrubTest method testScrubCorruptedCounterRow.
@Test
public void testScrubCorruptedCounterRow() throws IOException, WriteTimeoutException {
// When compression is enabled, for testing corrupted chunks we need enough partitions to cover
// at least 3 chunks of size COMPRESSION_CHUNK_LENGTH
int numPartitions = 1000;
CompactionManager.instance.disableAutoCompaction();
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(COUNTER_CF);
cfs.clearUnsafe();
fillCounterCF(cfs, numPartitions);
assertOrderedAll(cfs, numPartitions);
assertEquals(1, cfs.getLiveSSTables().size());
SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
//make sure to override at most 1 chunk when compression is enabled
overrideWithGarbage(sstable, ByteBufferUtil.bytes("0"), ByteBufferUtil.bytes("1"));
// with skipCorrupted == false, the scrub is expected to fail
try (LifecycleTransaction txn = cfs.getTracker().tryModify(Arrays.asList(sstable), OperationType.SCRUB);
Scrubber scrubber = new Scrubber(cfs, txn, false, true)) {
scrubber.scrub();
fail("Expected a CorruptSSTableException to be thrown");
} catch (IOError err) {
}
// with skipCorrupted == true, the corrupt rows will be skipped
Scrubber.ScrubResult scrubResult;
try (LifecycleTransaction txn = cfs.getTracker().tryModify(Arrays.asList(sstable), OperationType.SCRUB);
Scrubber scrubber = new Scrubber(cfs, txn, true, true)) {
scrubResult = scrubber.scrubWithResult();
}
assertNotNull(scrubResult);
boolean compression = Boolean.parseBoolean(System.getProperty("cassandra.test.compression", "false"));
if (compression) {
assertEquals(0, scrubResult.emptyRows);
assertEquals(numPartitions, scrubResult.badRows + scrubResult.goodRows);
//because we only corrupted 1 chunk and we chose enough partitions to cover at least 3 chunks
assertTrue(scrubResult.goodRows >= scrubResult.badRows * 2);
} else {
assertEquals(0, scrubResult.emptyRows);
assertEquals(1, scrubResult.badRows);
assertEquals(numPartitions - 1, scrubResult.goodRows);
}
assertEquals(1, cfs.getLiveSSTables().size());
assertOrderedAll(cfs, scrubResult.goodRows);
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class ScrubTest method testScrubCorruptedRowInSmallFile.
@Test
public void testScrubCorruptedRowInSmallFile() throws IOException, WriteTimeoutException {
// cannot test this with compression
assumeTrue(!Boolean.parseBoolean(System.getProperty("cassandra.test.compression", "false")));
CompactionManager.instance.disableAutoCompaction();
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(COUNTER_CF);
cfs.clearUnsafe();
fillCounterCF(cfs, 2);
assertOrderedAll(cfs, 2);
SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
// overwrite one row with garbage
overrideWithGarbage(sstable, ByteBufferUtil.bytes("0"), ByteBufferUtil.bytes("1"));
// with skipCorrupted == false, the scrub is expected to fail
try (LifecycleTransaction txn = cfs.getTracker().tryModify(Arrays.asList(sstable), OperationType.SCRUB);
Scrubber scrubber = new Scrubber(cfs, txn, false, true)) {
// with skipCorrupted == true, the corrupt row will be skipped
scrubber.scrub();
fail("Expected a CorruptSSTableException to be thrown");
} catch (IOError err) {
}
try (LifecycleTransaction txn = cfs.getTracker().tryModify(Arrays.asList(sstable), OperationType.SCRUB);
Scrubber scrubber = new Scrubber(cfs, txn, true, true)) {
// with skipCorrupted == true, the corrupt row will be skipped
scrubber.scrub();
scrubber.close();
}
assertEquals(1, cfs.getLiveSSTables().size());
// verify that we can read all of the rows, and there is now one less row
assertOrderedAll(cfs, 1);
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class CompactionAwareWriterTest method testMajorLeveledCompactionWriter.
@Test
public void testMajorLeveledCompactionWriter() throws Throwable {
ColumnFamilyStore cfs = getColumnFamilyStore();
cfs.disableAutoCompaction();
int rowCount = 20000;
int targetSSTableCount = 50;
populate(rowCount);
LifecycleTransaction txn = cfs.getTracker().tryModify(cfs.getLiveSSTables(), OperationType.COMPACTION);
long beforeSize = txn.originals().iterator().next().onDiskLength();
int sstableSize = (int) beforeSize / targetSSTableCount;
CompactionAwareWriter writer = new MajorLeveledCompactionWriter(cfs, cfs.getDirectories(), txn, txn.originals(), sstableSize);
int rows = compact(cfs, txn, writer);
assertEquals(targetSSTableCount, cfs.getLiveSSTables().size());
int[] levelCounts = new int[5];
assertEquals(rowCount, rows);
for (SSTableReader sstable : cfs.getLiveSSTables()) {
levelCounts[sstable.getSSTableLevel()]++;
}
assertEquals(0, levelCounts[0]);
assertEquals(10, levelCounts[1]);
// note that if we want more levels, fix this
assertEquals(targetSSTableCount - 10, levelCounts[2]);
for (int i = 3; i < levelCounts.length; i++) assertEquals(0, levelCounts[i]);
validateData(cfs, rowCount);
cfs.truncateBlocking();
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class CompactionAwareWriterTest method testMaxSSTableSizeWriter.
@Test
public void testMaxSSTableSizeWriter() throws Throwable {
ColumnFamilyStore cfs = getColumnFamilyStore();
cfs.disableAutoCompaction();
int rowCount = 1000;
populate(rowCount);
LifecycleTransaction txn = cfs.getTracker().tryModify(cfs.getLiveSSTables(), OperationType.COMPACTION);
long beforeSize = txn.originals().iterator().next().onDiskLength();
int sstableSize = (int) beforeSize / 10;
CompactionAwareWriter writer = new MaxSSTableSizeWriter(cfs, cfs.getDirectories(), txn, txn.originals(), sstableSize, 0);
int rows = compact(cfs, txn, writer);
assertEquals(10, cfs.getLiveSSTables().size());
assertEquals(rowCount, rows);
validateData(cfs, rowCount);
cfs.truncateBlocking();
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class AntiCompactionTest method antiCompactionSizeTest.
@Ignore
@Test
public void antiCompactionSizeTest() throws InterruptedException, IOException {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
cfs.disableAutoCompaction();
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Range<Token> range = new Range<Token>(new BytesToken(ByteBufferUtil.bytes(0)), new BytesToken(ByteBufferUtil.bytes(500)));
Collection<SSTableReader> sstables = cfs.getLiveSSTables();
UUID parentRepairSession = UUID.randomUUID();
try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.ANTICOMPACTION);
Refs<SSTableReader> refs = Refs.ref(sstables)) {
CompactionManager.instance.performAnticompaction(cfs, Arrays.asList(range), refs, txn, 12345, NO_PENDING_REPAIR, parentRepairSession);
}
long sum = 0;
long rows = 0;
for (SSTableReader x : cfs.getLiveSSTables()) {
sum += x.bytesOnDisk();
rows += x.getTotalRows();
}
assertEquals(sum, cfs.metric.liveDiskSpaceUsed.getCount());
//See writeFile for how this number is derived
assertEquals(rows, 1000 * (1000 * 5));
}
Aggregations