use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class SSTableRewriterTest method testCanonicalView.
@Test
public void testCanonicalView() throws Exception {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> sstables = Sets.newHashSet(s);
assertEquals(1, sstables.size());
boolean checked = false;
try (ISSTableScanner scanner = sstables.iterator().next().getScanner();
CompactionController controller = new CompactionController(cfs, sstables, 0);
LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN);
SSTableRewriter writer = new SSTableRewriter(txn, 1000, 10000000, false);
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory, txn));
while (ci.hasNext()) {
writer.append(ci.next());
if (!checked && writer.currentWriter().getFilePointer() > 15000000) {
checked = true;
ColumnFamilyStore.ViewFragment viewFragment = cfs.select(View.selectFunction(SSTableSet.CANONICAL));
// canonical view should have only one SSTable which is not opened early.
assertEquals(1, viewFragment.sstables.size());
SSTableReader sstable = viewFragment.sstables.get(0);
assertEquals(s.descriptor, sstable.descriptor);
assertTrue("Found early opened SSTable in canonical view: " + sstable.getFilename(), sstable.openReason != SSTableReader.OpenReason.EARLY);
}
}
}
truncateCF();
validateCFS(cfs);
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class SSTableRewriterTest method testNumberOfFiles_truncate.
@Test
public void testNumberOfFiles_truncate() throws Exception {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
cfs.disableAutoCompaction();
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> compacting = Sets.newHashSet(s);
List<SSTableReader> sstables;
int files = 1;
try (ISSTableScanner scanner = s.getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0);
LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 10000000, false);
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
while (ci.hasNext()) {
rewriter.append(ci.next());
if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
files++;
// we have one original file plus the ones we have switched out.
assertEquals(cfs.getLiveSSTables().size(), files);
}
}
sstables = rewriter.finish();
}
LifecycleTransaction.waitForDeletions();
assertFileCounts(s.descriptor.directory.list());
validateCFS(cfs);
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class SSTableMetadataTest method trackMaxMinColNames.
@Test
public void trackMaxMinColNames() throws CharacterCodingException, ExecutionException, InterruptedException {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard3");
for (int j = 0; j < 8; j++) {
String key = "row" + j;
for (int i = 100; i < 150; i++) {
new RowUpdateBuilder(store.metadata(), System.currentTimeMillis(), key).clustering(j + "col" + i).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
}
}
store.forceBlockingFlush();
assertEquals(1, store.getLiveSSTables().size());
for (SSTableReader sstable : store.getLiveSSTables()) {
assertEquals(ByteBufferUtil.string(sstable.getSSTableMetadata().minClusteringValues.get(0)), "0col100");
assertEquals(ByteBufferUtil.string(sstable.getSSTableMetadata().maxClusteringValues.get(0)), "7col149");
}
String key = "row2";
for (int i = 101; i < 299; i++) {
new RowUpdateBuilder(store.metadata(), System.currentTimeMillis(), key).clustering(9 + "col" + i).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
}
store.forceBlockingFlush();
store.forceMajorCompaction();
assertEquals(1, store.getLiveSSTables().size());
for (SSTableReader sstable : store.getLiveSSTables()) {
assertEquals(ByteBufferUtil.string(sstable.getSSTableMetadata().minClusteringValues.get(0)), "0col100");
assertEquals(ByteBufferUtil.string(sstable.getSSTableMetadata().maxClusteringValues.get(0)), "9col298");
}
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class SSTableRewriterTest method testSmallFiles.
@Test
public void testSmallFiles() throws Exception {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
cfs.disableAutoCompaction();
SSTableReader s = writeFile(cfs, 400);
cfs.addSSTable(s);
Set<SSTableReader> compacting = Sets.newHashSet(s);
List<SSTableReader> sstables;
int files = 1;
try (ISSTableScanner scanner = s.getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0);
LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 1000000, false);
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
while (ci.hasNext()) {
rewriter.append(ci.next());
if (rewriter.currentWriter().getOnDiskFilePointer() > 2500000) {
// all files are now opened early
assertEquals(files, cfs.getLiveSSTables().size());
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
files++;
}
}
sstables = rewriter.finish();
}
assertEquals(files, sstables.size());
assertEquals(files, cfs.getLiveSSTables().size());
LifecycleTransaction.waitForDeletions();
assertFileCounts(s.descriptor.directory.list());
validateCFS(cfs);
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class SSTableRewriterTest method testSSTableSplit.
@Test
public void testSSTableSplit() throws InterruptedException {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
cfs.disableAutoCompaction();
SSTableReader s = writeFile(cfs, 1000);
try (LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.UNKNOWN, s)) {
SSTableSplitter splitter = new SSTableSplitter(cfs, txn, 10);
splitter.split();
assertFileCounts(s.descriptor.directory.list());
LifecycleTransaction.waitForDeletions();
for (File f : s.descriptor.directory.listFiles()) {
// we need to clear out the data dir, otherwise tests running after this breaks
FileUtils.deleteRecursive(f);
}
}
truncate(cfs);
}
Aggregations