use of org.apache.cassandra.UpdateBuilder in project cassandra by apache.
the class LongLeveledCompactionStrategyTest method testParallelLeveledCompaction.
@Test
public void testParallelLeveledCompaction() throws Exception {
String ksname = KEYSPACE1;
String cfname = "StandardLeveled";
Keyspace keyspace = Keyspace.open(ksname);
ColumnFamilyStore store = keyspace.getColumnFamilyStore(cfname);
store.disableAutoCompaction();
CompactionStrategyManager mgr = store.getCompactionStrategyManager();
LeveledCompactionStrategy lcs = (LeveledCompactionStrategy) mgr.getStrategies().get(1).get(0);
// 100 KB value, make it easy to have multiple files
ByteBuffer value = ByteBuffer.wrap(new byte[100 * 1024]);
// Enough data to have a level 1 and 2
int rows = 128;
int columns = 10;
// Adds enough data to trigger multiple sstable per level
for (int r = 0; r < rows; r++) {
DecoratedKey key = Util.dk(String.valueOf(r));
UpdateBuilder builder = UpdateBuilder.create(store.metadata(), key);
for (int c = 0; c < columns; c++) builder.newRow("column" + c).add("val", value);
Mutation rm = new Mutation(builder.build());
rm.apply();
store.forceBlockingFlush();
}
// Execute LCS in parallel
ExecutorService executor = new ThreadPoolExecutor(4, 4, Long.MAX_VALUE, TimeUnit.SECONDS, new LinkedBlockingDeque<Runnable>());
List<Runnable> tasks = new ArrayList<Runnable>();
while (true) {
while (true) {
final AbstractCompactionTask nextTask = lcs.getNextBackgroundTask(Integer.MIN_VALUE);
if (nextTask == null)
break;
tasks.add(new Runnable() {
public void run() {
nextTask.execute(null);
}
});
}
if (tasks.isEmpty())
break;
List<Future<?>> futures = new ArrayList<Future<?>>(tasks.size());
for (Runnable r : tasks) futures.add(executor.submit(r));
FBUtilities.waitOnFutures(futures);
tasks.clear();
}
// Assert all SSTables are lined up correctly.
LeveledManifest manifest = lcs.manifest;
int levels = manifest.getLevelCount();
for (int level = 0; level < levels; level++) {
List<SSTableReader> sstables = manifest.getLevel(level);
// score check
assert (double) SSTableReader.getTotalBytes(sstables) / manifest.maxBytesForLevel(level, 1 * 1024 * 1024) < 1.00;
// overlap check for levels greater than 0
for (SSTableReader sstable : sstables) {
// level check
assert level == sstable.getSSTableLevel();
if (level > 0) {
// overlap check for levels greater than 0
Set<SSTableReader> overlaps = LeveledManifest.overlapping(sstable.first.getToken(), sstable.last.getToken(), sstables);
assert overlaps.size() == 1 && overlaps.contains(sstable);
}
}
}
}
use of org.apache.cassandra.UpdateBuilder in project cassandra by apache.
the class RangeTombstoneTest method simpleQueryWithRangeTombstoneTest.
@Test
public void simpleQueryWithRangeTombstoneTest() throws Exception {
Keyspace keyspace = Keyspace.open(KSNAME);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CFNAME);
// Inserting data
String key = "k1";
UpdateBuilder builder;
builder = UpdateBuilder.create(cfs.metadata(), key).withTimestamp(0);
for (int i = 0; i < 40; i += 2) builder.newRow(i).add("val", i);
builder.applyUnsafe();
cfs.forceBlockingFlush();
new RowUpdateBuilder(cfs.metadata(), 1, key).addRangeTombstone(10, 22).build().applyUnsafe();
builder = UpdateBuilder.create(cfs.metadata(), key).withTimestamp(2);
for (int i = 1; i < 40; i += 2) builder.newRow(i).add("val", i);
builder.applyUnsafe();
new RowUpdateBuilder(cfs.metadata(), 3, key).addRangeTombstone(19, 27).build().applyUnsafe();
// We don't flush to test with both a range tomsbtone in memtable and in sstable
// Queries by name
int[] live = new int[] { 4, 9, 11, 17, 28 };
int[] dead = new int[] { 12, 19, 21, 24, 27 };
AbstractReadCommandBuilder.SinglePartitionBuilder cmdBuilder = Util.cmd(cfs, key);
for (int i : live) cmdBuilder.includeRow(i);
for (int i : dead) cmdBuilder.includeRow(i);
Partition partition = Util.getOnlyPartitionUnfiltered(cmdBuilder.build());
int nowInSec = FBUtilities.nowInSeconds();
for (int i : live) assertTrue("Row " + i + " should be live", partition.getRow(Clustering.make(bb(i))).hasLiveData(nowInSec));
for (int i : dead) assertFalse("Row " + i + " shouldn't be live", partition.getRow(Clustering.make(bb(i))).hasLiveData(nowInSec));
// Queries by slices
partition = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key).fromIncl(7).toIncl(30).build());
for (int i : new int[] { 7, 8, 9, 11, 13, 15, 17, 28, 29, 30 }) assertTrue("Row " + i + " should be live", partition.getRow(Clustering.make(bb(i))).hasLiveData(nowInSec));
for (int i : new int[] { 10, 12, 14, 16, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27 }) assertFalse("Row " + i + " shouldn't be live", partition.getRow(Clustering.make(bb(i))).hasLiveData(nowInSec));
}
use of org.apache.cassandra.UpdateBuilder in project cassandra by apache.
the class RangeTombstoneTest method test7808_1.
@Test
public void test7808_1() throws ExecutionException, InterruptedException {
Keyspace ks = Keyspace.open(KSNAME);
ColumnFamilyStore cfs = ks.getColumnFamilyStore(CFNAME);
MigrationManager.announceTableUpdate(cfs.metadata().unbuild().gcGraceSeconds(2).build(), true);
String key = "7808_1";
UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), key).withTimestamp(0);
for (int i = 0; i < 40; i += 2) builder.newRow(i).add("val", i);
builder.apply();
cfs.forceBlockingFlush();
new Mutation(PartitionUpdate.fullPartitionDelete(cfs.metadata(), Util.dk(key), 1, 1)).apply();
cfs.forceBlockingFlush();
Thread.sleep(5);
cfs.forceMajorCompaction();
}
use of org.apache.cassandra.UpdateBuilder in project cassandra by apache.
the class RangeTombstoneTest method test7808_2.
@Test
public void test7808_2() throws ExecutionException, InterruptedException {
Keyspace ks = Keyspace.open(KSNAME);
ColumnFamilyStore cfs = ks.getColumnFamilyStore(CFNAME);
MigrationManager.announceTableUpdate(cfs.metadata().unbuild().gcGraceSeconds(2).build(), true);
String key = "7808_2";
UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), key).withTimestamp(0);
for (int i = 10; i < 20; i++) builder.newRow(i).add("val", i);
builder.apply();
cfs.forceBlockingFlush();
new Mutation(PartitionUpdate.fullPartitionDelete(cfs.metadata(), Util.dk(key), 0, 0)).apply();
UpdateBuilder.create(cfs.metadata(), key).withTimestamp(1).newRow(5).add("val", 5).apply();
cfs.forceBlockingFlush();
Thread.sleep(5);
cfs.forceMajorCompaction();
assertEquals(1, Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key).build()).rowCount());
}
use of org.apache.cassandra.UpdateBuilder in project cassandra by apache.
the class RangeTombstoneTest method overlappingRangeTest.
@Test
public void overlappingRangeTest() throws Exception {
CompactionManager.instance.disableAutoCompaction();
Keyspace keyspace = Keyspace.open(KSNAME);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CFNAME);
// Inserting data
String key = "k2";
UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), key).withTimestamp(0);
for (int i = 0; i < 20; i++) builder.newRow(i).add("val", i);
builder.applyUnsafe();
cfs.forceBlockingFlush();
new RowUpdateBuilder(cfs.metadata(), 1, key).addRangeTombstone(5, 15).build().applyUnsafe();
cfs.forceBlockingFlush();
new RowUpdateBuilder(cfs.metadata(), 1, key).addRangeTombstone(5, 10).build().applyUnsafe();
cfs.forceBlockingFlush();
new RowUpdateBuilder(cfs.metadata(), 2, key).addRangeTombstone(5, 8).build().applyUnsafe();
cfs.forceBlockingFlush();
Partition partition = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key).build());
int nowInSec = FBUtilities.nowInSeconds();
for (int i = 0; i < 5; i++) assertTrue("Row " + i + " should be live", partition.getRow(Clustering.make(bb(i))).hasLiveData(nowInSec));
for (int i = 16; i < 20; i++) assertTrue("Row " + i + " should be live", partition.getRow(Clustering.make(bb(i))).hasLiveData(nowInSec));
for (int i = 5; i <= 15; i++) assertFalse("Row " + i + " shouldn't be live", partition.getRow(Clustering.make(bb(i))).hasLiveData(nowInSec));
// Compact everything and re-test
CompactionManager.instance.performMaximal(cfs, false);
partition = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key).build());
for (int i = 0; i < 5; i++) assertTrue("Row " + i + " should be live", partition.getRow(Clustering.make(bb(i))).hasLiveData(FBUtilities.nowInSeconds()));
for (int i = 16; i < 20; i++) assertTrue("Row " + i + " should be live", partition.getRow(Clustering.make(bb(i))).hasLiveData(FBUtilities.nowInSeconds()));
for (int i = 5; i <= 15; i++) assertFalse("Row " + i + " shouldn't be live", partition.getRow(Clustering.make(bb(i))).hasLiveData(nowInSec));
}
Aggregations