use of org.apache.cassandra.db.compaction.Scrubber in project cassandra by apache.
the class ScrubTest method testScrubOutOfOrder.
@Test
public void testScrubOutOfOrder() throws Exception {
// This test assumes ByteOrderPartitioner to create out-of-order SSTable
IPartitioner oldPartitioner = DatabaseDescriptor.getPartitioner();
DatabaseDescriptor.setPartitionerUnsafe(new ByteOrderedPartitioner());
// Create out-of-order SSTable
File tempDir = File.createTempFile("ScrubTest.testScrubOutOfOrder", "").getParentFile();
// create ks/cf directory
File tempDataDir = new File(tempDir, String.join(File.separator, KEYSPACE, CF3));
tempDataDir.mkdirs();
try {
CompactionManager.instance.disableAutoCompaction();
Keyspace keyspace = Keyspace.open(KEYSPACE);
String columnFamily = CF3;
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(columnFamily);
cfs.clearUnsafe();
List<String> keys = Arrays.asList("t", "a", "b", "z", "c", "y", "d");
Descriptor desc = cfs.newSSTableDescriptor(tempDataDir);
LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.WRITE);
try (SSTableTxnWriter writer = new SSTableTxnWriter(txn, createTestWriter(desc, (long) keys.size(), cfs.metadata, txn))) {
for (String k : keys) {
PartitionUpdate update = UpdateBuilder.create(cfs.metadata(), Util.dk(k)).newRow("someName").add("val", "someValue").build();
writer.append(update.unfilteredIterator());
}
writer.finish(false);
}
try {
SSTableReader.open(desc, cfs.metadata);
fail("SSTR validation should have caught the out-of-order rows");
} catch (IllegalStateException ise) {
/* this is expected */
}
// open without validation for scrubbing
Set<Component> components = new HashSet<>();
if (new File(desc.filenameFor(Component.COMPRESSION_INFO)).exists())
components.add(Component.COMPRESSION_INFO);
components.add(Component.DATA);
components.add(Component.PRIMARY_INDEX);
components.add(Component.FILTER);
components.add(Component.STATS);
components.add(Component.SUMMARY);
components.add(Component.TOC);
SSTableReader sstable = SSTableReader.openNoValidation(desc, components, cfs);
if (sstable.last.compareTo(sstable.first) < 0)
sstable.last = sstable.first;
try (LifecycleTransaction scrubTxn = LifecycleTransaction.offline(OperationType.SCRUB, sstable);
Scrubber scrubber = new Scrubber(cfs, scrubTxn, false, true)) {
scrubber.scrub();
}
LifecycleTransaction.waitForDeletions();
cfs.loadNewSSTables();
assertOrderedAll(cfs, 7);
} finally {
FileUtils.deleteRecursive(tempDataDir);
// reset partitioner
DatabaseDescriptor.setPartitionerUnsafe(oldPartitioner);
}
}
use of org.apache.cassandra.db.compaction.Scrubber in project cassandra by apache.
the class ScrubTest method testScrubCorruptedCounterRow.
@Test
public void testScrubCorruptedCounterRow() throws IOException, WriteTimeoutException {
// When compression is enabled, for testing corrupted chunks we need enough partitions to cover
// at least 3 chunks of size COMPRESSION_CHUNK_LENGTH
int numPartitions = 1000;
CompactionManager.instance.disableAutoCompaction();
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(COUNTER_CF);
cfs.clearUnsafe();
fillCounterCF(cfs, numPartitions);
assertOrderedAll(cfs, numPartitions);
assertEquals(1, cfs.getLiveSSTables().size());
SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
//make sure to override at most 1 chunk when compression is enabled
overrideWithGarbage(sstable, ByteBufferUtil.bytes("0"), ByteBufferUtil.bytes("1"));
// with skipCorrupted == false, the scrub is expected to fail
try (LifecycleTransaction txn = cfs.getTracker().tryModify(Arrays.asList(sstable), OperationType.SCRUB);
Scrubber scrubber = new Scrubber(cfs, txn, false, true)) {
scrubber.scrub();
fail("Expected a CorruptSSTableException to be thrown");
} catch (IOError err) {
}
// with skipCorrupted == true, the corrupt rows will be skipped
Scrubber.ScrubResult scrubResult;
try (LifecycleTransaction txn = cfs.getTracker().tryModify(Arrays.asList(sstable), OperationType.SCRUB);
Scrubber scrubber = new Scrubber(cfs, txn, true, true)) {
scrubResult = scrubber.scrubWithResult();
}
assertNotNull(scrubResult);
boolean compression = Boolean.parseBoolean(System.getProperty("cassandra.test.compression", "false"));
if (compression) {
assertEquals(0, scrubResult.emptyRows);
assertEquals(numPartitions, scrubResult.badRows + scrubResult.goodRows);
//because we only corrupted 1 chunk and we chose enough partitions to cover at least 3 chunks
assertTrue(scrubResult.goodRows >= scrubResult.badRows * 2);
} else {
assertEquals(0, scrubResult.emptyRows);
assertEquals(1, scrubResult.badRows);
assertEquals(numPartitions - 1, scrubResult.goodRows);
}
assertEquals(1, cfs.getLiveSSTables().size());
assertOrderedAll(cfs, scrubResult.goodRows);
}
use of org.apache.cassandra.db.compaction.Scrubber in project cassandra by apache.
the class ScrubTest method testScrubCorruptedRowInSmallFile.
@Test
public void testScrubCorruptedRowInSmallFile() throws IOException, WriteTimeoutException {
// cannot test this with compression
assumeTrue(!Boolean.parseBoolean(System.getProperty("cassandra.test.compression", "false")));
CompactionManager.instance.disableAutoCompaction();
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(COUNTER_CF);
cfs.clearUnsafe();
fillCounterCF(cfs, 2);
assertOrderedAll(cfs, 2);
SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
// overwrite one row with garbage
overrideWithGarbage(sstable, ByteBufferUtil.bytes("0"), ByteBufferUtil.bytes("1"));
// with skipCorrupted == false, the scrub is expected to fail
try (LifecycleTransaction txn = cfs.getTracker().tryModify(Arrays.asList(sstable), OperationType.SCRUB);
Scrubber scrubber = new Scrubber(cfs, txn, false, true)) {
// with skipCorrupted == true, the corrupt row will be skipped
scrubber.scrub();
fail("Expected a CorruptSSTableException to be thrown");
} catch (IOError err) {
}
try (LifecycleTransaction txn = cfs.getTracker().tryModify(Arrays.asList(sstable), OperationType.SCRUB);
Scrubber scrubber = new Scrubber(cfs, txn, true, true)) {
// with skipCorrupted == true, the corrupt row will be skipped
scrubber.scrub();
scrubber.close();
}
assertEquals(1, cfs.getLiveSSTables().size());
// verify that we can read all of the rows, and there is now one less row
assertOrderedAll(cfs, 1);
}
Aggregations