Search in sources :

Example 21 with UpdateBuilder

use of org.apache.cassandra.UpdateBuilder in project cassandra by apache.

the class RangeTombstoneTest method simpleQueryWithRangeTombstoneTest.

@Test
public void simpleQueryWithRangeTombstoneTest() throws Exception {
    Keyspace keyspace = Keyspace.open(KSNAME);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CFNAME);
    boolean enforceStrictLiveness = cfs.metadata().enforceStrictLiveness();
    // Inserting data
    String key = "k1";
    UpdateBuilder builder;
    builder = UpdateBuilder.create(cfs.metadata(), key).withTimestamp(0);
    for (int i = 0; i < 40; i += 2) builder.newRow(i).add("val", i);
    builder.applyUnsafe();
    cfs.forceBlockingFlush();
    new RowUpdateBuilder(cfs.metadata(), 1, key).addRangeTombstone(10, 22).build().applyUnsafe();
    builder = UpdateBuilder.create(cfs.metadata(), key).withTimestamp(2);
    for (int i = 1; i < 40; i += 2) builder.newRow(i).add("val", i);
    builder.applyUnsafe();
    new RowUpdateBuilder(cfs.metadata(), 3, key).addRangeTombstone(19, 27).build().applyUnsafe();
    // We don't flush to test with both a range tomsbtone in memtable and in sstable
    // Queries by name
    int[] live = new int[] { 4, 9, 11, 17, 28 };
    int[] dead = new int[] { 12, 19, 21, 24, 27 };
    AbstractReadCommandBuilder.SinglePartitionBuilder cmdBuilder = Util.cmd(cfs, key);
    for (int i : live) cmdBuilder.includeRow(i);
    for (int i : dead) cmdBuilder.includeRow(i);
    Partition partition = Util.getOnlyPartitionUnfiltered(cmdBuilder.build());
    int nowInSec = FBUtilities.nowInSeconds();
    for (int i : live) assertTrue("Row " + i + " should be live", partition.getRow(Clustering.make(bb(i))).hasLiveData(nowInSec, enforceStrictLiveness));
    for (int i : dead) assertFalse("Row " + i + " shouldn't be live", partition.getRow(Clustering.make(bb(i))).hasLiveData(nowInSec, enforceStrictLiveness));
    // Queries by slices
    partition = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key).fromIncl(7).toIncl(30).build());
    for (int i : new int[] { 7, 8, 9, 11, 13, 15, 17, 28, 29, 30 }) assertTrue("Row " + i + " should be live", partition.getRow(Clustering.make(bb(i))).hasLiveData(nowInSec, enforceStrictLiveness));
    for (int i : new int[] { 10, 12, 14, 16, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27 }) assertFalse("Row " + i + " shouldn't be live", partition.getRow(Clustering.make(bb(i))).hasLiveData(nowInSec, enforceStrictLiveness));
}
Also used : UpdateBuilder(org.apache.cassandra.UpdateBuilder) Test(org.junit.Test)

Example 22 with UpdateBuilder

use of org.apache.cassandra.UpdateBuilder in project cassandra by apache.

the class RangeTombstoneTest method overlappingRangeTest.

@Test
public void overlappingRangeTest() throws Exception {
    CompactionManager.instance.disableAutoCompaction();
    Keyspace keyspace = Keyspace.open(KSNAME);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CFNAME);
    boolean enforceStrictLiveness = cfs.metadata().enforceStrictLiveness();
    // Inserting data
    String key = "k2";
    UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), key).withTimestamp(0);
    for (int i = 0; i < 20; i++) builder.newRow(i).add("val", i);
    builder.applyUnsafe();
    cfs.forceBlockingFlush();
    new RowUpdateBuilder(cfs.metadata(), 1, key).addRangeTombstone(5, 15).build().applyUnsafe();
    cfs.forceBlockingFlush();
    new RowUpdateBuilder(cfs.metadata(), 1, key).addRangeTombstone(5, 10).build().applyUnsafe();
    cfs.forceBlockingFlush();
    new RowUpdateBuilder(cfs.metadata(), 2, key).addRangeTombstone(5, 8).build().applyUnsafe();
    cfs.forceBlockingFlush();
    Partition partition = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key).build());
    int nowInSec = FBUtilities.nowInSeconds();
    for (int i = 0; i < 5; i++) assertTrue("Row " + i + " should be live", partition.getRow(Clustering.make(bb(i))).hasLiveData(nowInSec, enforceStrictLiveness));
    for (int i = 16; i < 20; i++) assertTrue("Row " + i + " should be live", partition.getRow(Clustering.make(bb(i))).hasLiveData(nowInSec, enforceStrictLiveness));
    for (int i = 5; i <= 15; i++) assertFalse("Row " + i + " shouldn't be live", partition.getRow(Clustering.make(bb(i))).hasLiveData(nowInSec, enforceStrictLiveness));
    // Compact everything and re-test
    CompactionManager.instance.performMaximal(cfs, false);
    partition = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key).build());
    for (int i = 0; i < 5; i++) assertTrue("Row " + i + " should be live", partition.getRow(Clustering.make(bb(i))).hasLiveData(FBUtilities.nowInSeconds(), enforceStrictLiveness));
    for (int i = 16; i < 20; i++) assertTrue("Row " + i + " should be live", partition.getRow(Clustering.make(bb(i))).hasLiveData(FBUtilities.nowInSeconds(), enforceStrictLiveness));
    for (int i = 5; i <= 15; i++) assertFalse("Row " + i + " shouldn't be live", partition.getRow(Clustering.make(bb(i))).hasLiveData(nowInSec, enforceStrictLiveness));
}
Also used : UpdateBuilder(org.apache.cassandra.UpdateBuilder) Test(org.junit.Test)

Example 23 with UpdateBuilder

use of org.apache.cassandra.UpdateBuilder in project cassandra by apache.

the class RangeTombstoneTest method test7808_1.

@Test
public void test7808_1() throws ExecutionException, InterruptedException {
    Keyspace ks = Keyspace.open(KSNAME);
    ColumnFamilyStore cfs = ks.getColumnFamilyStore(CFNAME);
    MigrationManager.announceTableUpdate(cfs.metadata().unbuild().gcGraceSeconds(2).build(), true);
    String key = "7808_1";
    UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), key).withTimestamp(0);
    for (int i = 0; i < 40; i += 2) builder.newRow(i).add("val", i);
    builder.apply();
    cfs.forceBlockingFlush();
    new Mutation(PartitionUpdate.fullPartitionDelete(cfs.metadata(), Util.dk(key), 1, 1)).apply();
    cfs.forceBlockingFlush();
    Thread.sleep(5);
    cfs.forceMajorCompaction();
}
Also used : UpdateBuilder(org.apache.cassandra.UpdateBuilder) Test(org.junit.Test)

Example 24 with UpdateBuilder

use of org.apache.cassandra.UpdateBuilder in project cassandra by apache.

the class AntiCompactionTest method writeFile.

private SSTableReader writeFile(ColumnFamilyStore cfs, int count) {
    File dir = cfs.getDirectories().getDirectoryForNewSSTables();
    Descriptor desc = cfs.newSSTableDescriptor(dir);
    try (SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, desc, 0, 0, NO_PENDING_REPAIR, false, new SerializationHeader(true, cfs.metadata(), cfs.metadata().regularAndStaticColumns(), EncodingStats.NO_STATS))) {
        for (int i = 0; i < count; i++) {
            UpdateBuilder builder = UpdateBuilder.create(metadata, ByteBufferUtil.bytes(i));
            for (int j = 0; j < count * 5; j++) builder.newRow("c" + j).add("val", "value1");
            writer.append(builder.build().unfilteredIterator());
        }
        Collection<SSTableReader> sstables = writer.finish(true);
        assertNotNull(sstables);
        assertEquals(1, sstables.size());
        return sstables.iterator().next();
    }
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) UpdateBuilder(org.apache.cassandra.UpdateBuilder) File(org.apache.cassandra.io.util.File) RangesAtEndpoint(org.apache.cassandra.locator.RangesAtEndpoint)

Example 25 with UpdateBuilder

use of org.apache.cassandra.UpdateBuilder in project cassandra by apache.

the class SSTableRewriterTest method writeFiles.

public static Set<SSTableReader> writeFiles(ColumnFamilyStore cfs, int fileCount, int partitionCount, int cellCount) {
    int i = 0;
    Set<SSTableReader> result = new LinkedHashSet<>();
    for (int f = 0; f < fileCount; f++) {
        File dir = cfs.getDirectories().getDirectoryForNewSSTables();
        Descriptor desc = cfs.newSSTableDescriptor(dir);
        try (SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, desc, 0, 0, null, false, new SerializationHeader(true, cfs.metadata(), cfs.metadata().regularAndStaticColumns(), EncodingStats.NO_STATS))) {
            int end = f == fileCount - 1 ? partitionCount : ((f + 1) * partitionCount) / fileCount;
            for (; i < end; i++) {
                UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), ByteBufferUtil.bytes(i));
                for (int j = 0; j < cellCount; j++) builder.newRow(Integer.toString(i)).add("val", random(0, 1000));
                writer.append(builder.build().unfilteredIterator());
            }
            result.addAll(writer.finish(true));
        }
    }
    return result;
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) SerializationHeader(org.apache.cassandra.db.SerializationHeader) UpdateBuilder(org.apache.cassandra.UpdateBuilder) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) File(org.apache.cassandra.io.util.File)

Aggregations

UpdateBuilder (org.apache.cassandra.UpdateBuilder)25 Test (org.junit.Test)20 ByteBuffer (java.nio.ByteBuffer)12 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)12 Random (java.util.Random)6 RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)4 TableMetadata (org.apache.cassandra.schema.TableMetadata)3 ArrayList (java.util.ArrayList)2 Range (org.apache.cassandra.dht.Range)2 Token (org.apache.cassandra.dht.Token)2 ConfigurationException (org.apache.cassandra.exceptions.ConfigurationException)2 ISSTableScanner (org.apache.cassandra.io.sstable.ISSTableScanner)2 File (org.apache.cassandra.io.util.File)2 ImmutableMap (com.google.common.collect.ImmutableMap)1 Iterators (com.google.common.collect.Iterators)1 java.util (java.util)1 Collection (java.util.Collection)1 LinkedList (java.util.LinkedList)1 List (java.util.List)1 UUID (java.util.UUID)1