use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class SSTableRewriterTest method testAllKeysReadable.
@Test
public void testAllKeysReadable() throws Exception {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
for (int i = 0; i < 100; i++) {
String key = Integer.toString(i);
for (int j = 0; j < 10; j++) new RowUpdateBuilder(cfs.metadata(), 100, key).clustering(Integer.toString(j)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().apply();
}
cfs.forceBlockingFlush();
cfs.forceMajorCompaction();
validateKeys(keyspace);
assertEquals(1, cfs.getLiveSSTables().size());
SSTableReader s = cfs.getLiveSSTables().iterator().next();
Set<SSTableReader> compacting = new HashSet<>();
compacting.add(s);
int keyCount = 0;
try (ISSTableScanner scanner = compacting.iterator().next().getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0);
LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 1, false);
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
while (ci.hasNext()) {
rewriter.append(ci.next());
if (keyCount % 10 == 0) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
}
keyCount++;
validateKeys(keyspace);
}
rewriter.finish();
}
validateKeys(keyspace);
LifecycleTransaction.waitForDeletions();
validateCFS(cfs);
truncate(cfs);
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class IndexSummaryManagerTest method testRebuildAtSamplingLevel.
@Test
public void testRebuildAtSamplingLevel() throws IOException {
String ksname = KEYSPACE1;
String cfname = CF_STANDARDLOWiINTERVAL;
Keyspace keyspace = Keyspace.open(ksname);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
cfs.truncateBlocking();
cfs.disableAutoCompaction();
ByteBuffer value = ByteBuffer.wrap(new byte[100]);
int numRows = 256;
for (int row = 0; row < numRows; row++) {
String key = String.format("%3d", row);
new RowUpdateBuilder(cfs.metadata(), 0, key).clustering("column").add("val", value).build().applyUnsafe();
}
cfs.forceBlockingFlush();
List<SSTableReader> sstables = new ArrayList<>(cfs.getLiveSSTables());
assertEquals(1, sstables.size());
SSTableReader original = sstables.get(0);
SSTableReader sstable = original;
try (LifecycleTransaction txn = cfs.getTracker().tryModify(asList(sstable), OperationType.UNKNOWN)) {
for (int samplingLevel = 1; samplingLevel < BASE_SAMPLING_LEVEL; samplingLevel++) {
sstable = sstable.cloneWithNewSummarySamplingLevel(cfs, samplingLevel);
assertEquals(samplingLevel, sstable.getIndexSummarySamplingLevel());
int expectedSize = (numRows * samplingLevel) / (cfs.metadata().params.minIndexInterval * BASE_SAMPLING_LEVEL);
assertEquals(expectedSize, sstable.getIndexSummarySize(), 1);
txn.update(sstable, true);
txn.checkpoint();
}
txn.finish();
}
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class IndexSummaryManagerTest method createSSTables.
private void createSSTables(String ksname, String cfname, int numSSTables, int numPartition) {
Keyspace keyspace = Keyspace.open(ksname);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
cfs.truncateBlocking();
cfs.disableAutoCompaction();
ArrayList<Future> futures = new ArrayList<>(numSSTables);
ByteBuffer value = ByteBuffer.wrap(new byte[100]);
for (int sstable = 0; sstable < numSSTables; sstable++) {
for (int p = 0; p < numPartition; p++) {
String key = String.format("%3d", p);
new RowUpdateBuilder(cfs.metadata(), 0, key).clustering("column").add("val", value).build().applyUnsafe();
}
futures.add(cfs.forceFlush());
}
for (Future future : futures) {
try {
future.get();
} catch (InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
}
assertEquals(numSSTables, cfs.getLiveSSTables().size());
validateData(cfs, numPartition);
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class IndexSummaryManagerTest method testChangeMinIndexInterval.
@Test
public void testChangeMinIndexInterval() throws IOException {
String ksname = KEYSPACE1;
// index interval of 8, no key caching
String cfname = CF_STANDARDLOWiINTERVAL;
Keyspace keyspace = Keyspace.open(ksname);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
int numSSTables = 1;
int numRows = 256;
createSSTables(ksname, cfname, numSSTables, numRows);
List<SSTableReader> sstables = new ArrayList<>(cfs.getLiveSSTables());
for (SSTableReader sstable : sstables) sstable.overrideReadMeter(new RestorableMeter(100.0, 100.0));
for (SSTableReader sstable : sstables) assertEquals(cfs.metadata().params.minIndexInterval, sstable.getEffectiveIndexInterval(), 0.001);
// double the min_index_interval
MigrationManager.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval * 2).build(), true);
IndexSummaryManager.instance.redistributeSummaries();
for (SSTableReader sstable : cfs.getLiveSSTables()) {
assertEquals(cfs.metadata().params.minIndexInterval, sstable.getEffectiveIndexInterval(), 0.001);
assertEquals(numRows / cfs.metadata().params.minIndexInterval, sstable.getIndexSummarySize());
}
// return min_index_interval to its original value
MigrationManager.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval).build(), true);
IndexSummaryManager.instance.redistributeSummaries();
for (SSTableReader sstable : cfs.getLiveSSTables()) {
assertEquals(cfs.metadata().params.minIndexInterval, sstable.getEffectiveIndexInterval(), 0.001);
assertEquals(numRows / cfs.metadata().params.minIndexInterval, sstable.getIndexSummarySize());
}
// halve the min_index_interval, but constrain the available space to exactly what we have now; as a result,
// the summary shouldn't change
MigrationManager.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval / 2).build(), true);
SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
long summarySpace = sstable.getIndexSummaryOffHeapSize();
try (LifecycleTransaction txn = cfs.getTracker().tryModify(asList(sstable), OperationType.UNKNOWN)) {
redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), summarySpace);
}
sstable = cfs.getLiveSSTables().iterator().next();
assertEquals(originalMinIndexInterval, sstable.getEffectiveIndexInterval(), 0.001);
assertEquals(numRows / originalMinIndexInterval, sstable.getIndexSummarySize());
// keep the min_index_interval the same, but now give the summary enough space to grow by 50%
double previousInterval = sstable.getEffectiveIndexInterval();
int previousSize = sstable.getIndexSummarySize();
try (LifecycleTransaction txn = cfs.getTracker().tryModify(asList(sstable), OperationType.UNKNOWN)) {
redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), (long) Math.ceil(summarySpace * 1.5));
}
sstable = cfs.getLiveSSTables().iterator().next();
assertEquals(previousSize * 1.5, (double) sstable.getIndexSummarySize(), 1);
assertEquals(previousInterval * (1.0 / 1.5), sstable.getEffectiveIndexInterval(), 0.001);
// return min_index_interval to it's original value (double it), but only give the summary enough space
// to have an effective index interval of twice the new min
MigrationManager.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval).build(), true);
try (LifecycleTransaction txn = cfs.getTracker().tryModify(asList(sstable), OperationType.UNKNOWN)) {
redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), (long) Math.ceil(summarySpace / 2.0));
}
sstable = cfs.getLiveSSTables().iterator().next();
assertEquals(originalMinIndexInterval * 2, sstable.getEffectiveIndexInterval(), 0.001);
assertEquals(numRows / (originalMinIndexInterval * 2), sstable.getIndexSummarySize());
// raise the min_index_interval above our current effective interval, but set the max_index_interval lower
// than what we actually have space for (meaning the index summary would ideally be smaller, but this would
// result in an effective interval above the new max)
MigrationManager.announceTableUpdate(cfs.metadata().unbuild().minIndexInterval(originalMinIndexInterval * 4).build(), true);
MigrationManager.announceTableUpdate(cfs.metadata().unbuild().maxIndexInterval(originalMinIndexInterval * 4).build(), true);
try (LifecycleTransaction txn = cfs.getTracker().tryModify(asList(sstable), OperationType.UNKNOWN)) {
redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), 10);
}
sstable = cfs.getLiveSSTables().iterator().next();
assertEquals(cfs.metadata().params.minIndexInterval, sstable.getEffectiveIndexInterval(), 0.001);
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class LocalSyncTaskTest method testDifference.
@Test
public void testDifference() throws Throwable {
Range<Token> range = new Range<>(partirioner.getMinimumToken(), partirioner.getRandomToken());
UUID parentRepairSession = UUID.randomUUID();
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Standard1");
ActiveRepairService.instance.registerParentRepairSession(parentRepairSession, FBUtilities.getBroadcastAddress(), Arrays.asList(cfs), Arrays.asList(range), false, ActiveRepairService.UNREPAIRED_SSTABLE, false);
RepairJobDesc desc = new RepairJobDesc(parentRepairSession, UUID.randomUUID(), KEYSPACE1, "Standard1", Arrays.asList(range));
MerkleTrees tree1 = createInitialTree(desc);
MerkleTrees tree2 = createInitialTree(desc);
// change a range in one of the trees
Token token = partirioner.midpoint(range.left, range.right);
tree1.invalidate(token);
MerkleTree.TreeRange changed = tree1.get(token);
changed.hash("non-empty hash!".getBytes());
Set<Range<Token>> interesting = new HashSet<>();
interesting.add(changed);
// difference the trees
// note: we reuse the same endpoint which is bogus in theory but fine here
TreeResponse r1 = new TreeResponse(InetAddress.getByName("127.0.0.1"), tree1);
TreeResponse r2 = new TreeResponse(InetAddress.getByName("127.0.0.2"), tree2);
LocalSyncTask task = new LocalSyncTask(desc, r1, r2, ActiveRepairService.UNREPAIRED_SSTABLE, null, false);
task.run();
// ensure that the changed range was recorded
assertEquals("Wrong differing ranges", interesting.size(), task.getCurrentStat().numberOfDifferences);
}
Aggregations