Search in sources :

Example 1 with ImmutableBTreePartition

use of org.apache.cassandra.db.partitions.ImmutableBTreePartition in project cassandra by apache.

the class BatchlogManagerTest method testDelete.

@Test
public void testDelete() {
    ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1);
    TableMetadata cfm = cfs.metadata();
    new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), ByteBufferUtil.bytes("1234")).clustering("c").add("val", "val" + 1234).build().applyUnsafe();
    DecoratedKey dk = cfs.decorateKey(ByteBufferUtil.bytes("1234"));
    ImmutableBTreePartition results = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, dk).build());
    Iterator<Row> iter = results.iterator();
    assert iter.hasNext();
    Mutation mutation = new Mutation(PartitionUpdate.fullPartitionDelete(cfm, dk, FBUtilities.timestampMicros(), FBUtilities.nowInSeconds()));
    mutation.applyUnsafe();
    Util.assertEmpty(Util.cmd(cfs, dk).build());
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) DecoratedKey(org.apache.cassandra.db.DecoratedKey) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Row(org.apache.cassandra.db.rows.Row) Mutation(org.apache.cassandra.db.Mutation) ImmutableBTreePartition(org.apache.cassandra.db.partitions.ImmutableBTreePartition)

Example 2 with ImmutableBTreePartition

use of org.apache.cassandra.db.partitions.ImmutableBTreePartition in project cassandra by apache.

the class CompactionsPurgeTest method testMajorCompactionPurgeTopLevelTombstoneWithMaxTimestamp.

@Test
public void testMajorCompactionPurgeTopLevelTombstoneWithMaxTimestamp() {
    CompactionManager.instance.disableAutoCompaction();
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    String cfName = "Standard1";
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName);
    String key = "key1";
    // inserts
    for (int i = 0; i < 10; i++) {
        RowUpdateBuilder builder = new RowUpdateBuilder(cfs.metadata(), 0, key);
        builder.clustering(String.valueOf(i)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
    }
    cfs.forceBlockingFlush();
    new Mutation(KEYSPACE1, dk(key)).add(PartitionUpdate.fullPartitionDelete(cfs.metadata(), dk(key), Long.MAX_VALUE, FBUtilities.nowInSeconds())).applyUnsafe();
    cfs.forceBlockingFlush();
    // major compact - tombstones should be purged
    FBUtilities.waitOnFutures(CompactionManager.instance.submitMaximal(cfs, Integer.MAX_VALUE, false));
    // resurrect one column
    RowUpdateBuilder builder = new RowUpdateBuilder(cfs.metadata(), 2, key);
    builder.clustering(String.valueOf(5)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
    cfs.forceBlockingFlush();
    cfs.invalidateCachedPartition(dk(key));
    ImmutableBTreePartition partition = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key).build());
    assertEquals(1, partition.rowCount());
}
Also used : ImmutableBTreePartition(org.apache.cassandra.db.partitions.ImmutableBTreePartition) Test(org.junit.Test)

Example 3 with ImmutableBTreePartition

use of org.apache.cassandra.db.partitions.ImmutableBTreePartition in project cassandra by apache.

the class CompactionsPurgeTest method testMinTimestampPurge.

/**
     * verify that we don't drop tombstones during a minor compaction that might still be relevant
     */
@Test
public void testMinTimestampPurge() {
    CompactionManager.instance.disableAutoCompaction();
    Keyspace keyspace = Keyspace.open(KEYSPACE2);
    String cfName = "Standard1";
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName);
    String key3 = "key3";
    // inserts
    new RowUpdateBuilder(cfs.metadata(), 8, key3).clustering("c1").add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
    new RowUpdateBuilder(cfs.metadata(), 8, key3).clustering("c2").add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
    cfs.forceBlockingFlush();
    // delete c1
    RowUpdateBuilder.deleteRow(cfs.metadata(), 10, key3, "c1").applyUnsafe();
    cfs.forceBlockingFlush();
    Collection<SSTableReader> sstablesIncomplete = cfs.getLiveSSTables();
    // delete c2 so we have new delete in a diffrent SSTable
    RowUpdateBuilder.deleteRow(cfs.metadata(), 9, key3, "c2").applyUnsafe();
    cfs.forceBlockingFlush();
    // compact the sstables with the c1/c2 data and the c1 tombstone
    List<AbstractCompactionTask> tasks = cfs.getCompactionStrategyManager().getUserDefinedTasks(sstablesIncomplete, Integer.MAX_VALUE);
    assertEquals(1, tasks.size());
    tasks.get(0).execute(null);
    // We should have both the c1 and c2 tombstones still. Since the min timestamp in the c2 tombstone
    // sstable is older than the c1 tombstone, it is invalid to throw out the c1 tombstone.
    ImmutableBTreePartition partition = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key3).build());
    assertEquals(2, partition.rowCount());
    for (Row row : partition) assertFalse(row.hasLiveData(FBUtilities.nowInSeconds()));
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) Row(org.apache.cassandra.db.rows.Row) ImmutableBTreePartition(org.apache.cassandra.db.partitions.ImmutableBTreePartition) Test(org.junit.Test)

Example 4 with ImmutableBTreePartition

use of org.apache.cassandra.db.partitions.ImmutableBTreePartition in project cassandra by apache.

the class CompactionsPurgeTest method testCompactionPurgeTombstonedRow.

@Test
public void testCompactionPurgeTombstonedRow() throws ExecutionException, InterruptedException {
    CompactionManager.instance.disableAutoCompaction();
    String keyspaceName = KEYSPACE1;
    String cfName = "Standard1";
    Keyspace keyspace = Keyspace.open(keyspaceName);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName);
    String key = "key3";
    // inserts
    for (int i = 0; i < 10; i++) {
        RowUpdateBuilder builder = new RowUpdateBuilder(cfs.metadata(), i, key);
        builder.clustering(String.valueOf(i)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
    }
    // deletes partition with timestamp such that not all columns are deleted
    Mutation rm = new Mutation(KEYSPACE1, dk(key));
    rm.add(PartitionUpdate.fullPartitionDelete(cfs.metadata(), dk(key), 4, FBUtilities.nowInSeconds()));
    rm.applyUnsafe();
    ImmutableBTreePartition partition = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key).build());
    assertFalse(partition.partitionLevelDeletion().isLive());
    // flush and major compact (with tombstone purging)
    cfs.forceBlockingFlush();
    Util.compactAll(cfs, Integer.MAX_VALUE).get();
    assertFalse(Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key).build()).isEmpty());
    // re-inserts with timestamp lower than delete
    for (int i = 0; i < 5; i++) {
        RowUpdateBuilder builder = new RowUpdateBuilder(cfs.metadata(), i, key);
        builder.clustering(String.valueOf(i)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
    }
    // Check that the second insert went in
    partition = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key).build());
    assertEquals(10, partition.rowCount());
}
Also used : ImmutableBTreePartition(org.apache.cassandra.db.partitions.ImmutableBTreePartition) Test(org.junit.Test)

Example 5 with ImmutableBTreePartition

use of org.apache.cassandra.db.partitions.ImmutableBTreePartition in project cassandra by apache.

the class DynamicCompositeTypeTest method testFullRoundReversed.

@Test
public void testFullRoundReversed() throws Exception {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARDDYNCOMPOSITE);
    ByteBuffer cname1 = createDynamicCompositeKey("test1", null, -1, false, true);
    ByteBuffer cname2 = createDynamicCompositeKey("test1", uuids[0], 24, false, true);
    ByteBuffer cname3 = createDynamicCompositeKey("test1", uuids[0], 42, false, true);
    ByteBuffer cname4 = createDynamicCompositeKey("test2", uuids[0], -1, false, true);
    ByteBuffer cname5 = createDynamicCompositeKey("test2", uuids[1], 42, false, true);
    ByteBuffer key = ByteBufferUtil.bytes("kr");
    long ts = FBUtilities.timestampMicros();
    new RowUpdateBuilder(cfs.metadata(), ts, key).clustering(cname5).add("val", "cname5").build().applyUnsafe();
    new RowUpdateBuilder(cfs.metadata(), ts, key).clustering(cname1).add("val", "cname1").build().applyUnsafe();
    new RowUpdateBuilder(cfs.metadata(), ts, key).clustering(cname4).add("val", "cname4").build().applyUnsafe();
    new RowUpdateBuilder(cfs.metadata(), ts, key).clustering(cname2).add("val", "cname2").build().applyUnsafe();
    new RowUpdateBuilder(cfs.metadata(), ts, key).clustering(cname3).add("val", "cname3").build().applyUnsafe();
    ColumnMetadata cdef = cfs.metadata().getColumn(ByteBufferUtil.bytes("val"));
    ImmutableBTreePartition readPartition = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key).build());
    Iterator<Row> iter = readPartition.iterator();
    compareValues(iter.next().getCell(cdef), "cname5");
    compareValues(iter.next().getCell(cdef), "cname4");
    // null UUID < reversed value
    compareValues(iter.next().getCell(cdef), "cname1");
    compareValues(iter.next().getCell(cdef), "cname3");
    compareValues(iter.next().getCell(cdef), "cname2");
}
Also used : ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) Row(org.apache.cassandra.db.rows.Row) ImmutableBTreePartition(org.apache.cassandra.db.partitions.ImmutableBTreePartition) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Aggregations

ImmutableBTreePartition (org.apache.cassandra.db.partitions.ImmutableBTreePartition)13 Test (org.junit.Test)11 Row (org.apache.cassandra.db.rows.Row)5 ByteBuffer (java.nio.ByteBuffer)4 ColumnMetadata (org.apache.cassandra.schema.ColumnMetadata)3 DecoratedKey (org.apache.cassandra.db.DecoratedKey)2 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)2 File (java.io.File)1 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)1 Mutation (org.apache.cassandra.db.Mutation)1 RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)1 UnfilteredSerializer (org.apache.cassandra.db.rows.UnfilteredSerializer)1 IndexInfo (org.apache.cassandra.io.sstable.IndexInfo)1 TableMetadata (org.apache.cassandra.schema.TableMetadata)1 LongSerializer (org.apache.cassandra.serializers.LongSerializer)1