use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class SSTableReaderTest method testSpannedIndexPositions.
@Test
public void testSpannedIndexPositions() throws IOException {
int originalMaxSegmentSize = MmappedRegions.MAX_SEGMENT_SIZE;
// each index entry is ~11 bytes, so this will generate lots of segments
MmappedRegions.MAX_SEGMENT_SIZE = 40;
try {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard1");
partitioner = store.getPartitioner();
// insert a bunch of data and compact to a single sstable
CompactionManager.instance.disableAutoCompaction();
for (int j = 0; j < 100; j += 2) {
new RowUpdateBuilder(store.metadata(), j, String.valueOf(j)).clustering("0").add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
}
store.forceBlockingFlush();
CompactionManager.instance.performMaximal(store, false);
// check that all our keys are found correctly
SSTableReader sstable = store.getLiveSSTables().iterator().next();
for (int j = 0; j < 100; j += 2) {
DecoratedKey dk = Util.dk(String.valueOf(j));
FileDataInput file = sstable.getFileDataInput(sstable.getPosition(dk, SSTableReader.Operator.EQ).position);
DecoratedKey keyInDisk = sstable.decorateKey(ByteBufferUtil.readWithShortLength(file));
assert keyInDisk.equals(dk) : String.format("%s != %s in %s", keyInDisk, dk, file.getPath());
}
// check no false positives
for (int j = 1; j < 110; j += 2) {
DecoratedKey dk = Util.dk(String.valueOf(j));
assert sstable.getPosition(dk, SSTableReader.Operator.EQ) == null;
}
} finally {
MmappedRegions.MAX_SEGMENT_SIZE = originalMaxSegmentSize;
}
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class SSTableReaderTest method testGetPositionsForRangesWithKeyCache.
@Test
public void testGetPositionsForRangesWithKeyCache() {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard2");
partitioner = store.getPartitioner();
CacheService.instance.keyCache.setCapacity(100);
// insert data and compact to a single sstable
CompactionManager.instance.disableAutoCompaction();
for (int j = 0; j < 10; j++) {
new RowUpdateBuilder(store.metadata(), j, String.valueOf(j)).clustering("0").add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
}
store.forceBlockingFlush();
CompactionManager.instance.performMaximal(store, false);
SSTableReader sstable = store.getLiveSSTables().iterator().next();
long p2 = sstable.getPosition(k(2), SSTableReader.Operator.EQ).position;
long p3 = sstable.getPosition(k(3), SSTableReader.Operator.EQ).position;
long p6 = sstable.getPosition(k(6), SSTableReader.Operator.EQ).position;
long p7 = sstable.getPosition(k(7), SSTableReader.Operator.EQ).position;
Pair<Long, Long> p = sstable.getPositionsForRanges(makeRanges(t(2), t(6))).get(0);
// range are start exclusive so we should start at 3
assert p.left == p3;
// to capture 6 we have to stop at the start of 7
assert p.right == p7;
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class SSTableReaderTest method testIndexSummaryUpsampleAndReload0.
private void testIndexSummaryUpsampleAndReload0() throws Exception {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
// index interval of 8, no key caching
final ColumnFamilyStore store = keyspace.getColumnFamilyStore("StandardLowIndexInterval");
CompactionManager.instance.disableAutoCompaction();
final int NUM_PARTITIONS = 512;
for (int j = 0; j < NUM_PARTITIONS; j++) {
new RowUpdateBuilder(store.metadata(), j, String.format("%3d", j)).clustering("0").add("val", String.format("%3d", j)).build().applyUnsafe();
}
store.forceBlockingFlush();
CompactionManager.instance.performMaximal(store, false);
Collection<SSTableReader> sstables = store.getLiveSSTables();
assert sstables.size() == 1;
final SSTableReader sstable = sstables.iterator().next();
try (LifecycleTransaction txn = store.getTracker().tryModify(Arrays.asList(sstable), OperationType.UNKNOWN)) {
SSTableReader replacement = sstable.cloneWithNewSummarySamplingLevel(store, sstable.getIndexSummarySamplingLevel() + 1);
txn.update(replacement, true);
txn.finish();
}
SSTableReader reopen = SSTableReader.open(sstable.descriptor);
assert reopen.getIndexSummarySamplingLevel() == sstable.getIndexSummarySamplingLevel() + 1;
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class SSTableReaderTest method testLoadingSummaryUsesCorrectPartitioner.
@Test
public void testLoadingSummaryUsesCorrectPartitioner() throws Exception {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore("Indexed1");
new RowUpdateBuilder(store.metadata(), System.currentTimeMillis(), "k1").clustering("0").add("birthdate", 1L).build().applyUnsafe();
store.forceBlockingFlush();
for (ColumnFamilyStore indexCfs : store.indexManager.getAllIndexColumnFamilyStores()) {
assert indexCfs.isIndex();
SSTableReader sstable = indexCfs.getLiveSSTables().iterator().next();
assert sstable.first.getToken() instanceof LocalToken;
sstable.saveSummary();
SSTableReader reopened = SSTableReader.open(sstable.descriptor);
assert reopened.first.getToken() instanceof LocalToken;
reopened.selfRef().release();
}
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class SSTableRewriterTest method testNumberOfFiles_dont_clean_readers.
@Test
public void testNumberOfFiles_dont_clean_readers() throws Exception {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> compacting = Sets.newHashSet(s);
List<SSTableReader> sstables;
int files = 1;
try (ISSTableScanner scanner = s.getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0);
LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 10000000, false);
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
while (ci.hasNext()) {
rewriter.append(ci.next());
if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
files++;
// we have one original file plus the ones we have switched out.
assertEquals(cfs.getLiveSSTables().size(), files);
}
}
sstables = rewriter.finish();
}
assertEquals(files, sstables.size());
assertEquals(files, cfs.getLiveSSTables().size());
LifecycleTransaction.waitForDeletions();
assertFileCounts(s.descriptor.directory.list());
validateCFS(cfs);
}
Aggregations