use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class AntiCompactionTest method antiCompactOne.
private void antiCompactOne(long repairedAt, UUID pendingRepair) throws Exception {
assert repairedAt != UNREPAIRED_SSTABLE || pendingRepair != null;
ColumnFamilyStore store = prepareColumnFamilyStore();
Collection<SSTableReader> sstables = getUnrepairedSSTables(store);
assertEquals(store.getLiveSSTables().size(), sstables.size());
Range<Token> range = new Range<Token>(new BytesToken("0".getBytes()), new BytesToken("4".getBytes()));
List<Range<Token>> ranges = Arrays.asList(range);
int repairedKeys = 0;
int pendingKeys = 0;
int nonRepairedKeys = 0;
try (LifecycleTransaction txn = store.getTracker().tryModify(sstables, OperationType.ANTICOMPACTION);
Refs<SSTableReader> refs = Refs.ref(sstables)) {
if (txn == null)
throw new IllegalStateException();
UUID parentRepairSession = UUID.randomUUID();
CompactionManager.instance.performAnticompaction(store, ranges, refs, txn, repairedAt, pendingRepair, parentRepairSession);
}
assertEquals(2, store.getLiveSSTables().size());
for (SSTableReader sstable : store.getLiveSSTables()) {
try (ISSTableScanner scanner = sstable.getScanner()) {
while (scanner.hasNext()) {
UnfilteredRowIterator row = scanner.next();
if (sstable.isRepaired() || sstable.isPendingRepair()) {
assertTrue(range.contains(row.partitionKey().getToken()));
repairedKeys += sstable.isRepaired() ? 1 : 0;
pendingKeys += sstable.isPendingRepair() ? 1 : 0;
} else {
assertFalse(range.contains(row.partitionKey().getToken()));
nonRepairedKeys++;
}
}
}
}
for (SSTableReader sstable : store.getLiveSSTables()) {
assertFalse(sstable.isMarkedCompacted());
assertEquals(1, sstable.selfRef().globalCount());
}
assertEquals(0, store.getTracker().getCompacting().size());
assertEquals(repairedKeys, repairedAt != UNREPAIRED_SSTABLE ? 4 : 0);
assertEquals(pendingKeys, pendingRepair != NO_PENDING_REPAIR ? 4 : 0);
assertEquals(nonRepairedKeys, 6);
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class BlacklistingCompactionsTest method testBlacklisting.
private void testBlacklisting(String tableName) throws Exception {
// this test does enough rows to force multiple block indexes to be used
Keyspace keyspace = Keyspace.open(KEYSPACE1);
final ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(tableName);
final int ROWS_PER_SSTABLE = 10;
final int SSTABLES = cfs.metadata().params.minIndexInterval * 2 / ROWS_PER_SSTABLE;
final int SSTABLES_TO_CORRUPT = 8;
assertTrue(String.format("Not enough sstables (%d), expected at least %d sstables to corrupt", SSTABLES, SSTABLES_TO_CORRUPT), SSTABLES > SSTABLES_TO_CORRUPT);
// disable compaction while flushing
cfs.disableAutoCompaction();
//test index corruption
//now create a few new SSTables
long maxTimestampExpected = Long.MIN_VALUE;
Set<DecoratedKey> inserted = new HashSet<>();
for (int j = 0; j < SSTABLES; j++) {
for (int i = 0; i < ROWS_PER_SSTABLE; i++) {
DecoratedKey key = Util.dk(String.valueOf(i));
long timestamp = j * ROWS_PER_SSTABLE + i;
new RowUpdateBuilder(cfs.metadata(), timestamp, key.getKey()).clustering(Long.valueOf(i)).add("val", Long.valueOf(i)).build().applyUnsafe();
maxTimestampExpected = Math.max(timestamp, maxTimestampExpected);
inserted.add(key);
}
cfs.forceBlockingFlush();
CompactionsTest.assertMaxTimestamp(cfs, maxTimestampExpected);
assertEquals(inserted.toString(), inserted.size(), Util.getAll(Util.cmd(cfs).build()).size());
}
Collection<SSTableReader> sstables = cfs.getLiveSSTables();
int currentSSTable = 0;
// corrupt first 'sstablesToCorrupt' SSTables
for (SSTableReader sstable : sstables) {
if (currentSSTable + 1 > SSTABLES_TO_CORRUPT)
break;
RandomAccessFile raf = null;
try {
int corruptionSize = 100;
raf = new RandomAccessFile(sstable.getFilename(), "rw");
assertNotNull(raf);
assertTrue(raf.length() > corruptionSize);
long pos = random.nextInt((int) (raf.length() - corruptionSize));
logger.info("Corrupting sstable {} [{}] at pos {} / {}", currentSSTable, sstable.getFilename(), pos, raf.length());
raf.seek(pos);
// We want to write something large enough that the corruption cannot get undetected
// (even without compression)
byte[] corruption = new byte[corruptionSize];
random.nextBytes(corruption);
raf.write(corruption);
if (ChunkCache.instance != null)
ChunkCache.instance.invalidateFile(sstable.getFilename());
} finally {
FileUtils.closeQuietly(raf);
}
currentSSTable++;
}
int failures = 0;
// in case something will go wrong we don't want to loop forever using for (;;)
for (int i = 0; i < sstables.size(); i++) {
try {
cfs.forceMajorCompaction();
} catch (Exception e) {
// kind of a hack since we're not specifying just CorruptSSTableExceptions, or (what we actually expect)
// an ExecutionException wrapping a CSSTE. This is probably Good Enough though, since if there are
// other errors in compaction presumably the other tests would bring that to light.
failures++;
continue;
}
break;
}
cfs.truncateBlocking();
assertEquals(SSTABLES_TO_CORRUPT, failures);
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class CompactionAwareWriterTest method testSplittingSizeTieredCompactionWriter.
@Test
public void testSplittingSizeTieredCompactionWriter() throws Throwable {
ColumnFamilyStore cfs = getColumnFamilyStore();
cfs.disableAutoCompaction();
int rowCount = 10000;
populate(rowCount);
LifecycleTransaction txn = cfs.getTracker().tryModify(cfs.getLiveSSTables(), OperationType.COMPACTION);
long beforeSize = txn.originals().iterator().next().onDiskLength();
CompactionAwareWriter writer = new SplittingSizeTieredCompactionWriter(cfs, cfs.getDirectories(), txn, txn.originals(), 0);
int rows = compact(cfs, txn, writer);
long expectedSize = beforeSize / 2;
List<SSTableReader> sortedSSTables = new ArrayList<>(cfs.getLiveSSTables());
Collections.sort(sortedSSTables, new Comparator<SSTableReader>() {
@Override
public int compare(SSTableReader o1, SSTableReader o2) {
return Longs.compare(o2.onDiskLength(), o1.onDiskLength());
}
});
for (SSTableReader sstable : sortedSSTables) {
// we dont create smaller files than this, everything will be in the last file
if (expectedSize > SplittingSizeTieredCompactionWriter.DEFAULT_SMALLEST_SSTABLE_BYTES)
// allow 1% diff in estimated vs actual size
assertEquals(expectedSize, sstable.onDiskLength(), expectedSize / 100);
expectedSize /= 2;
}
assertEquals(rowCount, rows);
validateData(cfs, rowCount);
cfs.truncateBlocking();
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class CompactionManagerGetSSTablesForValidationTest method legacyIncrementalRepair.
@Test
public void legacyIncrementalRepair() throws Exception {
makeSSTables();
registerRepair(true);
modifySSTables();
// get sstables for repair
Validator validator = new Validator(desc, coordinator, FBUtilities.nowInSeconds(), false);
Set<SSTableReader> sstables = Sets.newHashSet(CompactionManager.instance.getSSTablesToValidate(cfs, validator));
Assert.assertNotNull(sstables);
Assert.assertEquals(2, sstables.size());
Assert.assertTrue(sstables.contains(pendingRepair));
Assert.assertTrue(sstables.contains(unrepaired));
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class SSTableMetadataTest method testMaxMinComposites.
@Test
public void testMaxMinComposites() throws CharacterCodingException, ExecutionException, InterruptedException {
/*
creates two sstables, columns like this:
---------------------
k |a0:9|a1:8|..|a9:0
---------------------
and
---------------------
k2 |b0:9|b1:8|..|b9:0
---------------------
meaning max columns are b9 and 9, min is a0 and 0
*/
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("StandardComposite2");
for (int i = 0; i < 10; i++) {
new RowUpdateBuilder(cfs.metadata(), 0, "k").clustering("a" + (9 - i), getBytes(i)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
}
cfs.forceBlockingFlush();
for (int i = 0; i < 10; i++) {
new RowUpdateBuilder(cfs.metadata(), 0, "k2").clustering("b" + (9 - i), getBytes(i)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
}
cfs.forceBlockingFlush();
cfs.forceMajorCompaction();
assertEquals(cfs.getLiveSSTables().size(), 1);
for (SSTableReader sstable : cfs.getLiveSSTables()) {
assertEquals("b9", ByteBufferUtil.string(sstable.getSSTableMetadata().maxClusteringValues.get(0)));
assertEquals(9, ByteBufferUtil.toInt(sstable.getSSTableMetadata().maxClusteringValues.get(1)));
assertEquals("a0", ByteBufferUtil.string(sstable.getSSTableMetadata().minClusteringValues.get(0)));
assertEquals(0, ByteBufferUtil.toInt(sstable.getSSTableMetadata().minClusteringValues.get(1)));
}
}
Aggregations