use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class TTLExpiryTest method testCheckForExpiredSSTableBlockers.
@Test
public void testCheckForExpiredSSTableBlockers() throws InterruptedException {
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore("Standard1");
cfs.truncateBlocking();
cfs.disableAutoCompaction();
MigrationManager.announceTableUpdate(cfs.metadata().unbuild().gcGraceSeconds(0).build(), true);
new RowUpdateBuilder(cfs.metadata(), System.currentTimeMillis(), "test").noRowMarker().add("col1", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
cfs.forceBlockingFlush();
SSTableReader blockingSSTable = cfs.getSSTables(SSTableSet.LIVE).iterator().next();
for (int i = 0; i < 10; i++) {
new RowUpdateBuilder(cfs.metadata(), System.currentTimeMillis(), "test").noRowMarker().delete("col1").build().applyUnsafe();
cfs.forceBlockingFlush();
}
Multimap<SSTableReader, SSTableReader> blockers = SSTableExpiredBlockers.checkForExpiredSSTableBlockers(cfs.getSSTables(SSTableSet.LIVE), (int) (System.currentTimeMillis() / 1000) + 100);
assertEquals(1, blockers.keySet().size());
assertTrue(blockers.keySet().contains(blockingSSTable));
assertEquals(10, blockers.get(blockingSSTable).size());
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class CustomCassandraIndex method buildBlocking.
private void buildBlocking() {
baseCfs.forceBlockingFlush();
try (ColumnFamilyStore.RefViewFragment viewFragment = baseCfs.selectAndReference(View.selectFunction(SSTableSet.CANONICAL));
Refs<SSTableReader> sstables = viewFragment.refs) {
if (sstables.isEmpty()) {
logger.info("No SSTable data for {}.{} to build index {} from, marking empty index as built", baseCfs.metadata.keyspace, baseCfs.metadata.name, metadata.name);
baseCfs.indexManager.markIndexBuilt(metadata.name);
return;
}
logger.info("Submitting index build of {} for data in {}", metadata.name, getSSTableNames(sstables));
SecondaryIndexBuilder builder = new CollatedViewIndexBuilder(baseCfs, Collections.singleton(this), new ReducingKeyIterator(sstables));
Future<?> future = CompactionManager.instance.submitIndexBuild(builder);
FBUtilities.waitOnFuture(future);
indexCfs.forceBlockingFlush();
baseCfs.indexManager.markIndexBuilt(metadata.name);
}
logger.info("Index build of {} complete", metadata.name);
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class SSTableWriterTest method testAbortTxnWithClosedWriterShouldRemoveSSTable.
@Test
public void testAbortTxnWithClosedWriterShouldRemoveSSTable() throws InterruptedException {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
File dir = cfs.getDirectories().getDirectoryForNewSSTables();
LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.STREAM);
try (SSTableWriter writer = getWriter(cfs, dir, txn)) {
for (int i = 0; i < 10000; i++) {
UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), random(i, 10)).withTimestamp(1);
for (int j = 0; j < 100; j++) builder.newRow("" + j).add("val", ByteBuffer.allocate(1000));
writer.append(builder.build().unfilteredIterator());
}
assertFileCounts(dir.list());
for (int i = 10000; i < 20000; i++) {
UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), random(i, 10)).withTimestamp(1);
for (int j = 0; j < 100; j++) builder.newRow("" + j).add("val", ByteBuffer.allocate(1000));
writer.append(builder.build().unfilteredIterator());
}
SSTableReader sstable = writer.finish(true);
int datafiles = assertFileCounts(dir.list());
assertEquals(datafiles, 1);
sstable.selfRef().release();
// open till .abort() is called (via the builder)
if (!FBUtilities.isWindows) {
LifecycleTransaction.waitForDeletions();
assertFileCounts(dir.list());
}
txn.abort();
LifecycleTransaction.waitForDeletions();
datafiles = assertFileCounts(dir.list());
assertEquals(datafiles, 0);
validateCFS(cfs);
}
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class SSTableWriterTest method testValueTooBigCorruption.
@Test
public void testValueTooBigCorruption() throws InterruptedException {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_SMALL_MAX_VALUE);
truncate(cfs);
File dir = cfs.getDirectories().getDirectoryForNewSSTables();
LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.STREAM);
try (SSTableWriter writer1 = getWriter(cfs, dir, txn)) {
UpdateBuilder largeValue = UpdateBuilder.create(cfs.metadata(), "large_value").withTimestamp(1);
largeValue.newRow("clustering").add("val", ByteBuffer.allocate(2 * 1024 * 1024));
writer1.append(largeValue.build().unfilteredIterator());
SSTableReader sstable = writer1.finish(true);
txn.update(sstable, false);
try {
DecoratedKey dk = Util.dk("large_value");
UnfilteredRowIterator rowIter = sstable.iterator(dk, Slices.ALL, ColumnFilter.all(cfs.metadata()), false);
while (rowIter.hasNext()) {
rowIter.next();
// no-op read, as values may not appear expected
}
fail("Expected a CorruptSSTableException to be thrown");
} catch (CorruptSSTableException e) {
}
txn.abort();
LifecycleTransaction.waitForDeletions();
}
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class SSTableWriterTestBase method validateCFS.
/**
* Validate the column family store by checking that all live
* sstables are referenced only once and are not marked as
* compacting. It also checks that the generation of the data
* files on disk is the same as that of the live sstables,
* to ensure that the data files on disk belong to the live
* sstables. Finally, it checks that the metrics contain the
* correct disk space used, live and total.
*
* Note that this method will submit a maximal compaction task
* if there are live sstables, in order to check that there is at least
* a maximal task when there are live sstables.
*
* This method has therefore side effects and should be called after
* performing any other checks on previous operations, especially
* checks involving files on disk.
*
* @param cfs - the column family store to validate
*/
public static void validateCFS(ColumnFamilyStore cfs) {
Set<Integer> liveDescriptors = new HashSet<>();
long spaceUsed = 0;
for (SSTableReader sstable : cfs.getLiveSSTables()) {
assertFalse(sstable.isMarkedCompacted());
assertEquals(1, sstable.selfRef().globalCount());
liveDescriptors.add(sstable.descriptor.generation);
spaceUsed += sstable.bytesOnDisk();
}
for (File dir : cfs.getDirectories().getCFDirectories()) {
for (File f : dir.listFiles()) {
if (f.getName().contains("Data")) {
Descriptor d = Descriptor.fromFilename(f.getAbsolutePath());
assertTrue(d.toString(), liveDescriptors.contains(d.generation));
}
}
}
assertEquals(spaceUsed, cfs.metric.liveDiskSpaceUsed.getCount());
assertEquals(spaceUsed, cfs.metric.totalDiskSpaceUsed.getCount());
assertTrue(cfs.getTracker().getCompacting().isEmpty());
if (cfs.getLiveSSTables().size() > 0)
assertFalse(CompactionManager.instance.submitMaximal(cfs, cfs.gcBefore((int) (System.currentTimeMillis() / 1000)), false).isEmpty());
}
Aggregations