use of org.apache.cassandra.io.sstable.format.SSTableWriter in project cassandra by apache.
the class SSTableWriterTest method testValueTooBigCorruption.
@Test
public void testValueTooBigCorruption() throws InterruptedException {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_SMALL_MAX_VALUE);
truncate(cfs);
File dir = cfs.getDirectories().getDirectoryForNewSSTables();
LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.STREAM);
try (SSTableWriter writer1 = getWriter(cfs, dir, txn)) {
UpdateBuilder largeValue = UpdateBuilder.create(cfs.metadata(), "large_value").withTimestamp(1);
largeValue.newRow("clustering").add("val", ByteBuffer.allocate(2 * 1024 * 1024));
writer1.append(largeValue.build().unfilteredIterator());
SSTableReader sstable = writer1.finish(true);
txn.update(sstable, false);
try {
DecoratedKey dk = Util.dk("large_value");
UnfilteredRowIterator rowIter = sstable.iterator(dk, Slices.ALL, ColumnFilter.all(cfs.metadata()), false);
while (rowIter.hasNext()) {
rowIter.next();
// no-op read, as values may not appear expected
}
fail("Expected a CorruptSSTableException to be thrown");
} catch (CorruptSSTableException e) {
}
txn.abort();
LifecycleTransaction.waitForDeletions();
}
}
use of org.apache.cassandra.io.sstable.format.SSTableWriter in project cassandra by apache.
the class SSTableWriterTest method testAbortTxnWithClosedAndOpenWriterShouldRemoveAllSSTables.
@Test
public void testAbortTxnWithClosedAndOpenWriterShouldRemoveAllSSTables() throws InterruptedException {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
File dir = cfs.getDirectories().getDirectoryForNewSSTables();
LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.STREAM);
SSTableWriter writer1 = getWriter(cfs, dir, txn);
SSTableWriter writer2 = getWriter(cfs, dir, txn);
try {
for (int i = 0; i < 10000; i++) {
UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), random(i, 10)).withTimestamp(1);
for (int j = 0; j < 100; j++) builder.newRow("" + j).add("val", ByteBuffer.allocate(1000));
writer1.append(builder.build().unfilteredIterator());
}
assertFileCounts(dir.list());
for (int i = 10000; i < 20000; i++) {
UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), random(i, 10)).withTimestamp(1);
for (int j = 0; j < 100; j++) builder.newRow("" + j).add("val", ByteBuffer.allocate(1000));
writer2.append(builder.build().unfilteredIterator());
}
SSTableReader sstable = writer1.finish(true);
txn.update(sstable, false);
assertFileCounts(dir.list());
int datafiles = assertFileCounts(dir.list());
assertEquals(datafiles, 2);
// open till .abort() is called (via the builder)
if (!FBUtilities.isWindows) {
LifecycleTransaction.waitForDeletions();
assertFileCounts(dir.list());
}
txn.abort();
LifecycleTransaction.waitForDeletions();
datafiles = assertFileCounts(dir.list());
assertEquals(datafiles, 0);
validateCFS(cfs);
} finally {
writer1.close();
writer2.close();
}
}
use of org.apache.cassandra.io.sstable.format.SSTableWriter in project cassandra by apache.
the class SSTableWriterTest method testAbortTxnWithOpenEarlyShouldRemoveSSTable.
@Test
public void testAbortTxnWithOpenEarlyShouldRemoveSSTable() throws InterruptedException {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
File dir = cfs.getDirectories().getDirectoryForNewSSTables();
LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.WRITE);
try (SSTableWriter writer = getWriter(cfs, dir, txn)) {
for (int i = 0; i < 10000; i++) {
UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), random(i, 10)).withTimestamp(1);
for (int j = 0; j < 100; j++) builder.newRow("" + j).add("val", ByteBuffer.allocate(1000));
writer.append(builder.build().unfilteredIterator());
}
SSTableReader s = writer.setMaxDataAge(1000).openEarly();
assert s != null;
assertFileCounts(dir.list());
for (int i = 10000; i < 20000; i++) {
UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), random(i, 10)).withTimestamp(1);
for (int j = 0; j < 100; j++) builder.newRow("" + j).add("val", ByteBuffer.allocate(1000));
writer.append(builder.build().unfilteredIterator());
}
SSTableReader s2 = writer.setMaxDataAge(1000).openEarly();
assertTrue(s.last.compareTo(s2.last) < 0);
assertFileCounts(dir.list());
s.selfRef().release();
s2.selfRef().release();
int datafiles = assertFileCounts(dir.list());
assertEquals(datafiles, 1);
// open till .abort() is called (via the builder)
if (!FBUtilities.isWindows) {
LifecycleTransaction.waitForDeletions();
assertFileCounts(dir.list());
}
writer.abort();
txn.abort();
LifecycleTransaction.waitForDeletions();
datafiles = assertFileCounts(dir.list());
assertEquals(datafiles, 0);
validateCFS(cfs);
}
}
use of org.apache.cassandra.io.sstable.format.SSTableWriter in project cassandra by apache.
the class MaxSSTableSizeWriter method switchCompactionLocation.
@Override
public void switchCompactionLocation(Directories.DataDirectory location) {
sstableDirectory = location;
@SuppressWarnings("resource") SSTableWriter writer = SSTableWriter.create(cfs.newSSTableDescriptor(getDirectories().getLocationForDisk(sstableDirectory)), estimatedTotalKeys / estimatedSSTables, minRepairedAt, pendingRepair, cfs.metadata, new MetadataCollector(allSSTables, cfs.metadata().comparator, level), SerializationHeader.make(cfs.metadata(), nonExpiredSSTables), cfs.indexManager.listIndexes(), txn);
sstableWriter.switchWriter(writer);
}
Aggregations