use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class ColumnFamilyStoreTest method testScrubDataDirectories.
@Test
public void testScrubDataDirectories() throws Throwable {
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1);
ColumnFamilyStore.scrubDataDirectories(cfs.metadata());
new RowUpdateBuilder(cfs.metadata(), 2, "key").clustering("name").add("val", "2").build().applyUnsafe();
cfs.forceBlockingFlush();
// Nuke the metadata and reload that sstable
Collection<SSTableReader> ssTables = cfs.getLiveSSTables();
assertEquals(1, ssTables.size());
SSTableReader ssTable = ssTables.iterator().next();
String dataFileName = ssTable.descriptor.filenameFor(Component.DATA);
String tmpDataFileName = ssTable.descriptor.tmpFilenameFor(Component.DATA);
new File(dataFileName).renameTo(new File(tmpDataFileName));
ssTable.selfRef().release();
ColumnFamilyStore.scrubDataDirectories(cfs.metadata());
List<File> ssTableFiles = new Directories(cfs.metadata()).sstableLister(Directories.OnTxnErr.THROW).listFiles();
assertNotNull(ssTableFiles);
assertEquals(0, ssTableFiles.size());
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class AbstractPendingRepairTest method makeSSTable.
/**
* creates and returns an sstable
*
* @param orphan if true, the sstable will be removed from the unrepaired strategy
*/
SSTableReader makeSSTable(boolean orphan) {
int pk = nextSSTableKey++;
Set<SSTableReader> pre = cfs.getLiveSSTables();
QueryProcessor.executeInternal(String.format("INSERT INTO %s.%s (k, v) VALUES(?, ?)", ks, tbl), pk, pk);
cfs.forceBlockingFlush();
Set<SSTableReader> post = cfs.getLiveSSTables();
Set<SSTableReader> diff = new HashSet<>(post);
diff.removeAll(pre);
assert diff.size() == 1;
SSTableReader sstable = diff.iterator().next();
if (orphan) {
Iterables.any(csm.getUnrepaired(), s -> s.getSSTables().contains(sstable));
csm.getUnrepaired().forEach(s -> s.removeSSTable(sstable));
}
return sstable;
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class AntiCompactionTest method antiCompactOne.
private void antiCompactOne(long repairedAt, UUID pendingRepair) throws Exception {
assert repairedAt != UNREPAIRED_SSTABLE || pendingRepair != null;
ColumnFamilyStore store = prepareColumnFamilyStore();
Collection<SSTableReader> sstables = getUnrepairedSSTables(store);
assertEquals(store.getLiveSSTables().size(), sstables.size());
Range<Token> range = new Range<Token>(new BytesToken("0".getBytes()), new BytesToken("4".getBytes()));
List<Range<Token>> ranges = Arrays.asList(range);
int repairedKeys = 0;
int pendingKeys = 0;
int nonRepairedKeys = 0;
try (LifecycleTransaction txn = store.getTracker().tryModify(sstables, OperationType.ANTICOMPACTION);
Refs<SSTableReader> refs = Refs.ref(sstables)) {
if (txn == null)
throw new IllegalStateException();
UUID parentRepairSession = UUID.randomUUID();
CompactionManager.instance.performAnticompaction(store, ranges, refs, txn, repairedAt, pendingRepair, parentRepairSession);
}
assertEquals(2, store.getLiveSSTables().size());
for (SSTableReader sstable : store.getLiveSSTables()) {
try (ISSTableScanner scanner = sstable.getScanner()) {
while (scanner.hasNext()) {
UnfilteredRowIterator row = scanner.next();
if (sstable.isRepaired() || sstable.isPendingRepair()) {
assertTrue(range.contains(row.partitionKey().getToken()));
repairedKeys += sstable.isRepaired() ? 1 : 0;
pendingKeys += sstable.isPendingRepair() ? 1 : 0;
} else {
assertFalse(range.contains(row.partitionKey().getToken()));
nonRepairedKeys++;
}
}
}
}
for (SSTableReader sstable : store.getLiveSSTables()) {
assertFalse(sstable.isMarkedCompacted());
assertEquals(1, sstable.selfRef().globalCount());
}
assertEquals(0, store.getTracker().getCompacting().size());
assertEquals(repairedKeys, repairedAt != UNREPAIRED_SSTABLE ? 4 : 0);
assertEquals(pendingKeys, pendingRepair != NO_PENDING_REPAIR ? 4 : 0);
assertEquals(nonRepairedKeys, 6);
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class AntiCompactionTest method shouldSkipAntiCompactionForNonIntersectingRange.
@Test
public void shouldSkipAntiCompactionForNonIntersectingRange() throws InterruptedException, IOException {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF);
store.disableAutoCompaction();
for (int table = 0; table < 10; table++) {
generateSStable(store, Integer.toString(table));
}
Collection<SSTableReader> sstables = getUnrepairedSSTables(store);
assertEquals(store.getLiveSSTables().size(), sstables.size());
Range<Token> range = new Range<Token>(new BytesToken("-1".getBytes()), new BytesToken("-10".getBytes()));
List<Range<Token>> ranges = Arrays.asList(range);
UUID parentRepairSession = UUID.randomUUID();
try (LifecycleTransaction txn = store.getTracker().tryModify(sstables, OperationType.ANTICOMPACTION);
Refs<SSTableReader> refs = Refs.ref(sstables)) {
CompactionManager.instance.performAnticompaction(store, ranges, refs, txn, 1, NO_PENDING_REPAIR, parentRepairSession);
}
assertThat(store.getLiveSSTables().size(), is(10));
assertThat(Iterables.get(store.getLiveSSTables(), 0).isRepaired(), is(false));
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class BlacklistingCompactionsTest method testBlacklisting.
private void testBlacklisting(String tableName) throws Exception {
// this test does enough rows to force multiple block indexes to be used
Keyspace keyspace = Keyspace.open(KEYSPACE1);
final ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(tableName);
final int ROWS_PER_SSTABLE = 10;
final int SSTABLES = cfs.metadata().params.minIndexInterval * 2 / ROWS_PER_SSTABLE;
final int SSTABLES_TO_CORRUPT = 8;
assertTrue(String.format("Not enough sstables (%d), expected at least %d sstables to corrupt", SSTABLES, SSTABLES_TO_CORRUPT), SSTABLES > SSTABLES_TO_CORRUPT);
// disable compaction while flushing
cfs.disableAutoCompaction();
//test index corruption
//now create a few new SSTables
long maxTimestampExpected = Long.MIN_VALUE;
Set<DecoratedKey> inserted = new HashSet<>();
for (int j = 0; j < SSTABLES; j++) {
for (int i = 0; i < ROWS_PER_SSTABLE; i++) {
DecoratedKey key = Util.dk(String.valueOf(i));
long timestamp = j * ROWS_PER_SSTABLE + i;
new RowUpdateBuilder(cfs.metadata(), timestamp, key.getKey()).clustering(Long.valueOf(i)).add("val", Long.valueOf(i)).build().applyUnsafe();
maxTimestampExpected = Math.max(timestamp, maxTimestampExpected);
inserted.add(key);
}
cfs.forceBlockingFlush();
CompactionsTest.assertMaxTimestamp(cfs, maxTimestampExpected);
assertEquals(inserted.toString(), inserted.size(), Util.getAll(Util.cmd(cfs).build()).size());
}
Collection<SSTableReader> sstables = cfs.getLiveSSTables();
int currentSSTable = 0;
// corrupt first 'sstablesToCorrupt' SSTables
for (SSTableReader sstable : sstables) {
if (currentSSTable + 1 > SSTABLES_TO_CORRUPT)
break;
RandomAccessFile raf = null;
try {
int corruptionSize = 100;
raf = new RandomAccessFile(sstable.getFilename(), "rw");
assertNotNull(raf);
assertTrue(raf.length() > corruptionSize);
long pos = random.nextInt((int) (raf.length() - corruptionSize));
logger.info("Corrupting sstable {} [{}] at pos {} / {}", currentSSTable, sstable.getFilename(), pos, raf.length());
raf.seek(pos);
// We want to write something large enough that the corruption cannot get undetected
// (even without compression)
byte[] corruption = new byte[corruptionSize];
random.nextBytes(corruption);
raf.write(corruption);
if (ChunkCache.instance != null)
ChunkCache.instance.invalidateFile(sstable.getFilename());
} finally {
FileUtils.closeQuietly(raf);
}
currentSSTable++;
}
int failures = 0;
// in case something will go wrong we don't want to loop forever using for (;;)
for (int i = 0; i < sstables.size(); i++) {
try {
cfs.forceMajorCompaction();
} catch (Exception e) {
// kind of a hack since we're not specifying just CorruptSSTableExceptions, or (what we actually expect)
// an ExecutionException wrapping a CSSTE. This is probably Good Enough though, since if there are
// other errors in compaction presumably the other tests would bring that to light.
failures++;
continue;
}
break;
}
cfs.truncateBlocking();
assertEquals(SSTABLES_TO_CORRUPT, failures);
}
Aggregations