use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class ScrubTest method testScrubOutOfOrder.
@Test
public void testScrubOutOfOrder() throws Exception {
// This test assumes ByteOrderPartitioner to create out-of-order SSTable
IPartitioner oldPartitioner = DatabaseDescriptor.getPartitioner();
DatabaseDescriptor.setPartitionerUnsafe(new ByteOrderedPartitioner());
// Create out-of-order SSTable
File tempDir = File.createTempFile("ScrubTest.testScrubOutOfOrder", "").getParentFile();
// create ks/cf directory
File tempDataDir = new File(tempDir, String.join(File.separator, KEYSPACE, CF3));
tempDataDir.mkdirs();
try {
CompactionManager.instance.disableAutoCompaction();
Keyspace keyspace = Keyspace.open(KEYSPACE);
String columnFamily = CF3;
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(columnFamily);
cfs.clearUnsafe();
List<String> keys = Arrays.asList("t", "a", "b", "z", "c", "y", "d");
Descriptor desc = cfs.newSSTableDescriptor(tempDataDir);
LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.WRITE);
try (SSTableTxnWriter writer = new SSTableTxnWriter(txn, createTestWriter(desc, (long) keys.size(), cfs.metadata, txn))) {
for (String k : keys) {
PartitionUpdate update = UpdateBuilder.create(cfs.metadata(), Util.dk(k)).newRow("someName").add("val", "someValue").build();
writer.append(update.unfilteredIterator());
}
writer.finish(false);
}
try {
SSTableReader.open(desc, cfs.metadata);
fail("SSTR validation should have caught the out-of-order rows");
} catch (IllegalStateException ise) {
/* this is expected */
}
// open without validation for scrubbing
Set<Component> components = new HashSet<>();
if (new File(desc.filenameFor(Component.COMPRESSION_INFO)).exists())
components.add(Component.COMPRESSION_INFO);
components.add(Component.DATA);
components.add(Component.PRIMARY_INDEX);
components.add(Component.FILTER);
components.add(Component.STATS);
components.add(Component.SUMMARY);
components.add(Component.TOC);
SSTableReader sstable = SSTableReader.openNoValidation(desc, components, cfs);
if (sstable.last.compareTo(sstable.first) < 0)
sstable.last = sstable.first;
try (LifecycleTransaction scrubTxn = LifecycleTransaction.offline(OperationType.SCRUB, sstable);
Scrubber scrubber = new Scrubber(cfs, scrubTxn, false, true)) {
scrubber.scrub();
}
LifecycleTransaction.waitForDeletions();
cfs.loadNewSSTables();
assertOrderedAll(cfs, 7);
} finally {
FileUtils.deleteRecursive(tempDataDir);
// reset partitioner
DatabaseDescriptor.setPartitionerUnsafe(oldPartitioner);
}
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class SSTableWriterTest method testAbortTxnWithClosedWriterShouldRemoveSSTable.
@Test
public void testAbortTxnWithClosedWriterShouldRemoveSSTable() throws InterruptedException {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
File dir = cfs.getDirectories().getDirectoryForNewSSTables();
LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.STREAM);
try (SSTableWriter writer = getWriter(cfs, dir, txn)) {
for (int i = 0; i < 10000; i++) {
UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), random(i, 10)).withTimestamp(1);
for (int j = 0; j < 100; j++) builder.newRow("" + j).add("val", ByteBuffer.allocate(1000));
writer.append(builder.build().unfilteredIterator());
}
assertFileCounts(dir.list());
for (int i = 10000; i < 20000; i++) {
UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), random(i, 10)).withTimestamp(1);
for (int j = 0; j < 100; j++) builder.newRow("" + j).add("val", ByteBuffer.allocate(1000));
writer.append(builder.build().unfilteredIterator());
}
SSTableReader sstable = writer.finish(true);
int datafiles = assertFileCounts(dir.list());
assertEquals(datafiles, 1);
sstable.selfRef().release();
// open till .abort() is called (via the builder)
if (!FBUtilities.isWindows) {
LifecycleTransaction.waitForDeletions();
assertFileCounts(dir.list());
}
txn.abort();
LifecycleTransaction.waitForDeletions();
datafiles = assertFileCounts(dir.list());
assertEquals(datafiles, 0);
validateCFS(cfs);
}
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class SSTableWriterTest method testValueTooBigCorruption.
@Test
public void testValueTooBigCorruption() throws InterruptedException {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_SMALL_MAX_VALUE);
truncate(cfs);
File dir = cfs.getDirectories().getDirectoryForNewSSTables();
LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.STREAM);
try (SSTableWriter writer1 = getWriter(cfs, dir, txn)) {
UpdateBuilder largeValue = UpdateBuilder.create(cfs.metadata(), "large_value").withTimestamp(1);
largeValue.newRow("clustering").add("val", ByteBuffer.allocate(2 * 1024 * 1024));
writer1.append(largeValue.build().unfilteredIterator());
SSTableReader sstable = writer1.finish(true);
txn.update(sstable, false);
try {
DecoratedKey dk = Util.dk("large_value");
UnfilteredRowIterator rowIter = sstable.iterator(dk, Slices.ALL, ColumnFilter.all(cfs.metadata()), false);
while (rowIter.hasNext()) {
rowIter.next();
// no-op read, as values may not appear expected
}
fail("Expected a CorruptSSTableException to be thrown");
} catch (CorruptSSTableException e) {
}
txn.abort();
LifecycleTransaction.waitForDeletions();
}
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class SSTableFlushObserverTest method testFlushObserver.
@Test
public void testFlushObserver() {
TableMetadata cfm = TableMetadata.builder(KS_NAME, CF_NAME).addPartitionKeyColumn("id", UTF8Type.instance).addRegularColumn("first_name", UTF8Type.instance).addRegularColumn("age", Int32Type.instance).addRegularColumn("height", LongType.instance).build();
LifecycleTransaction transaction = LifecycleTransaction.offline(OperationType.COMPACTION);
FlushObserver observer = new FlushObserver();
String sstableDirectory = DatabaseDescriptor.getAllDataFileLocations()[0];
File directory = new File(sstableDirectory + File.pathSeparator + KS_NAME + File.pathSeparator + CF_NAME);
directory.deleteOnExit();
if (!directory.exists() && !directory.mkdirs())
throw new FSWriteError(new IOException("failed to create tmp directory"), directory.getAbsolutePath());
SSTableFormat.Type sstableFormat = SSTableFormat.Type.current();
BigTableWriter writer = new BigTableWriter(new Descriptor(sstableFormat.info.getLatestVersion(), directory, KS_NAME, CF_NAME, 0, sstableFormat), 10L, 0L, null, TableMetadataRef.forOfflineTools(cfm), new MetadataCollector(cfm.comparator).sstableLevel(0), new SerializationHeader(true, cfm, cfm.regularAndStaticColumns(), EncodingStats.NO_STATS), Collections.singletonList(observer), transaction);
SSTableReader reader = null;
Multimap<ByteBuffer, Cell> expected = ArrayListMultimap.create();
try {
final long now = System.currentTimeMillis();
ByteBuffer key = UTF8Type.instance.fromString("key1");
expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(27)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("jack")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(183L))));
writer.append(new RowIterator(cfm, key.duplicate(), Collections.singletonList(buildRow(expected.get(key)))));
key = UTF8Type.instance.fromString("key2");
expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(30)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("jim")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(180L))));
writer.append(new RowIterator(cfm, key, Collections.singletonList(buildRow(expected.get(key)))));
key = UTF8Type.instance.fromString("key3");
expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(30)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("ken")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(178L))));
writer.append(new RowIterator(cfm, key, Collections.singletonList(buildRow(expected.get(key)))));
reader = writer.finish(true);
} finally {
FileUtils.closeQuietly(writer);
}
Assert.assertTrue(observer.isComplete);
Assert.assertEquals(expected.size(), observer.rows.size());
for (Pair<ByteBuffer, Long> e : observer.rows.keySet()) {
ByteBuffer key = e.left;
Long indexPosition = e.right;
try (FileDataInput index = reader.ifile.createReader(indexPosition)) {
ByteBuffer indexKey = ByteBufferUtil.readWithShortLength(index);
Assert.assertEquals(0, UTF8Type.instance.compare(key, indexKey));
} catch (IOException ex) {
throw new FSReadError(ex, reader.getIndexFilename());
}
Assert.assertEquals(expected.get(key), observer.rows.get(e));
}
}
use of org.apache.cassandra.db.lifecycle.LifecycleTransaction in project cassandra by apache.
the class SSTableReaderTest method testIndexSummaryUpsampleAndReload0.
private void testIndexSummaryUpsampleAndReload0() throws Exception {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
// index interval of 8, no key caching
final ColumnFamilyStore store = keyspace.getColumnFamilyStore("StandardLowIndexInterval");
CompactionManager.instance.disableAutoCompaction();
final int NUM_PARTITIONS = 512;
for (int j = 0; j < NUM_PARTITIONS; j++) {
new RowUpdateBuilder(store.metadata(), j, String.format("%3d", j)).clustering("0").add("val", String.format("%3d", j)).build().applyUnsafe();
}
store.forceBlockingFlush();
CompactionManager.instance.performMaximal(store, false);
Collection<SSTableReader> sstables = store.getLiveSSTables();
assert sstables.size() == 1;
final SSTableReader sstable = sstables.iterator().next();
try (LifecycleTransaction txn = store.getTracker().tryModify(Arrays.asList(sstable), OperationType.UNKNOWN)) {
SSTableReader replacement = sstable.cloneWithNewSummarySamplingLevel(store, sstable.getIndexSummarySamplingLevel() + 1);
txn.update(replacement, true);
txn.finish();
}
SSTableReader reopen = SSTableReader.open(sstable.descriptor);
assert reopen.getIndexSummarySamplingLevel() == sstable.getIndexSummarySamplingLevel() + 1;
}
Aggregations