Search in sources :

Example 6 with RowUpdateBuilder

use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.

the class CommitLogSegmentManagerCDCTest method testCDCWriteTimeout.

@Test
public void testCDCWriteTimeout() throws Throwable {
    createTable("CREATE TABLE %s (idx int, data text, primary key(idx)) WITH cdc=true;");
    CommitLogSegmentManagerCDC cdcMgr = (CommitLogSegmentManagerCDC) CommitLog.instance.segmentManager;
    TableMetadata cfm = currentTableMetadata();
    // Confirm that logic to check for whether or not we can allocate new CDC segments works
    Integer originalCDCSize = DatabaseDescriptor.getCDCSpaceInMB();
    try {
        DatabaseDescriptor.setCDCSpaceInMB(32);
        // Spin until we hit CDC capacity and make sure we get a WriteTimeout
        try {
            // Should trigger on anything < 20:1 compression ratio during compressed test
            for (int i = 0; i < 100; i++) {
                new RowUpdateBuilder(cfm, 0, i).add("data", randomizeBuffer(DatabaseDescriptor.getCommitLogSegmentSize() / 3)).build().apply();
            }
            Assert.fail("Expected WriteTimeoutException from full CDC but did not receive it.");
        } catch (WriteTimeoutException e) {
        // expected, do nothing
        }
        expectCurrentCDCState(CDCState.FORBIDDEN);
        // Confirm we can create a non-cdc table and write to it even while at cdc capacity
        createTable("CREATE TABLE %s (idx int, data text, primary key(idx)) WITH cdc=false;");
        execute("INSERT INTO %s (idx, data) VALUES (1, '1');");
        // Confirm that, on flush+recyle, we see files show up in cdc_raw
        Keyspace.open(keyspace()).getColumnFamilyStore(currentTable()).forceBlockingFlush();
        CommitLog.instance.forceRecycleAllSegments();
        cdcMgr.awaitManagementTasksCompletion();
        Assert.assertTrue("Expected files to be moved to overflow.", getCDCRawCount() > 0);
        // Simulate a CDC consumer reading files then deleting them
        for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).listFiles()) FileUtils.deleteWithConfirm(f);
        // Update size tracker to reflect deleted files. Should flip flag on current allocatingFrom to allow.
        cdcMgr.updateCDCTotalSize();
        expectCurrentCDCState(CDCState.PERMITTED);
    } finally {
        DatabaseDescriptor.setCDCSpaceInMB(originalCDCSize);
    }
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) WriteTimeoutException(org.apache.cassandra.exceptions.WriteTimeoutException) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) File(java.io.File) Test(org.junit.Test)

Example 7 with RowUpdateBuilder

use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.

the class DateTieredCompactionStrategyTest method testPrepBucket.

@Test
public void testPrepBucket() {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
    cfs.disableAutoCompaction();
    ByteBuffer value = ByteBuffer.wrap(new byte[100]);
    // create 3 sstables
    int numSSTables = 3;
    for (int r = 0; r < numSSTables; r++) {
        DecoratedKey key = Util.dk(String.valueOf(r));
        new RowUpdateBuilder(cfs.metadata(), r, key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
        cfs.forceBlockingFlush();
    }
    cfs.forceBlockingFlush();
    List<SSTableReader> sstrs = new ArrayList<>(cfs.getLiveSSTables());
    List<SSTableReader> newBucket = newestBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32, 9, 10, Long.MAX_VALUE, new SizeTieredCompactionStrategyOptions());
    assertTrue("incoming bucket should not be accepted when it has below the min threshold SSTables", newBucket.isEmpty());
    newBucket = newestBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32, 10, 10, Long.MAX_VALUE, new SizeTieredCompactionStrategyOptions());
    assertFalse("non-incoming bucket should be accepted when it has at least 2 SSTables", newBucket.isEmpty());
    assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(0).getMinTimestamp(), sstrs.get(0).getMaxTimestamp());
    assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(1).getMinTimestamp(), sstrs.get(1).getMaxTimestamp());
    assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(2).getMinTimestamp(), sstrs.get(2).getMaxTimestamp());
    cfs.truncateBlocking();
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) Keyspace(org.apache.cassandra.db.Keyspace) DecoratedKey(org.apache.cassandra.db.DecoratedKey) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 8 with RowUpdateBuilder

use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.

the class DateTieredCompactionStrategyTest method testDropExpiredSSTables.

@Test
public void testDropExpiredSSTables() throws InterruptedException {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
    cfs.disableAutoCompaction();
    ByteBuffer value = ByteBuffer.wrap(new byte[100]);
    // create 2 sstables
    DecoratedKey key = Util.dk(String.valueOf("expired"));
    new RowUpdateBuilder(cfs.metadata(), System.currentTimeMillis(), 1, key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
    cfs.forceBlockingFlush();
    SSTableReader expiredSSTable = cfs.getLiveSSTables().iterator().next();
    Thread.sleep(10);
    key = Util.dk(String.valueOf("nonexpired"));
    new RowUpdateBuilder(cfs.metadata(), System.currentTimeMillis(), key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
    cfs.forceBlockingFlush();
    assertEquals(cfs.getLiveSSTables().size(), 2);
    Map<String, String> options = new HashMap<>();
    options.put(DateTieredCompactionStrategyOptions.BASE_TIME_KEY, "30");
    options.put(DateTieredCompactionStrategyOptions.TIMESTAMP_RESOLUTION_KEY, "MILLISECONDS");
    options.put(DateTieredCompactionStrategyOptions.MAX_SSTABLE_AGE_KEY, Double.toString((1d / (24 * 60 * 60))));
    options.put(DateTieredCompactionStrategyOptions.EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_KEY, "0");
    DateTieredCompactionStrategy dtcs = new DateTieredCompactionStrategy(cfs, options);
    for (SSTableReader sstable : cfs.getLiveSSTables()) dtcs.addSSTable(sstable);
    dtcs.startup();
    assertNull(dtcs.getNextBackgroundTask((int) (System.currentTimeMillis() / 1000)));
    Thread.sleep(2000);
    AbstractCompactionTask t = dtcs.getNextBackgroundTask((int) (System.currentTimeMillis() / 1000));
    assertNotNull(t);
    assertEquals(1, Iterables.size(t.transaction.originals()));
    SSTableReader sstable = t.transaction.originals().iterator().next();
    assertEquals(sstable, expiredSSTable);
    t.transaction.abort();
    cfs.truncateBlocking();
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) Keyspace(org.apache.cassandra.db.Keyspace) DecoratedKey(org.apache.cassandra.db.DecoratedKey) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 9 with RowUpdateBuilder

use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.

the class SizeTieredCompactionStrategyTest method testPrepBucket.

@Test
public void testPrepBucket() throws Exception {
    String ksname = KEYSPACE1;
    String cfname = "Standard1";
    Keyspace keyspace = Keyspace.open(ksname);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
    cfs.truncateBlocking();
    cfs.disableAutoCompaction();
    ByteBuffer value = ByteBuffer.wrap(new byte[100]);
    // create 3 sstables
    int numSSTables = 3;
    for (int r = 0; r < numSSTables; r++) {
        String key = String.valueOf(r);
        new RowUpdateBuilder(cfs.metadata(), 0, key).clustering("column").add("val", value).build().applyUnsafe();
        cfs.forceBlockingFlush();
    }
    cfs.forceBlockingFlush();
    List<SSTableReader> sstrs = new ArrayList<>(cfs.getLiveSSTables());
    Pair<List<SSTableReader>, Double> bucket;
    List<SSTableReader> interestingBucket = mostInterestingBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32);
    assertTrue("nothing should be returned when all buckets are below the min threshold", interestingBucket.isEmpty());
    sstrs.get(0).overrideReadMeter(new RestorableMeter(100.0, 100.0));
    sstrs.get(1).overrideReadMeter(new RestorableMeter(200.0, 200.0));
    sstrs.get(2).overrideReadMeter(new RestorableMeter(300.0, 300.0));
    long estimatedKeys = sstrs.get(0).estimatedKeys();
    // if we have more than the max threshold, the coldest should be dropped
    bucket = trimToThresholdWithHotness(sstrs, 2);
    assertEquals("one bucket should have been dropped", 2, bucket.left.size());
    double expectedBucketHotness = (200.0 + 300.0) / estimatedKeys;
    assertEquals(String.format("bucket hotness (%f) should be close to %f", bucket.right, expectedBucketHotness), expectedBucketHotness, bucket.right, 1.0);
}
Also used : ArrayList(java.util.ArrayList) ByteBuffer(java.nio.ByteBuffer) RestorableMeter(org.apache.cassandra.metrics.RestorableMeter) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) ArrayList(java.util.ArrayList) List(java.util.List) Test(org.junit.Test)

Example 10 with RowUpdateBuilder

use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.

the class PartitionUpdateTest method testOperationCountWithCompactTable.

@Test
public void testOperationCountWithCompactTable() {
    createTable("CREATE TABLE %s (key text PRIMARY KEY, a int) WITH COMPACT STORAGE");
    TableMetadata cfm = currentTableMetadata();
    PartitionUpdate update = new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), "key0").add("a", 1).buildUpdate();
    Assert.assertEquals(1, update.operationCount());
    update = new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), "key0").buildUpdate();
    Assert.assertEquals(0, update.operationCount());
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) PartitionUpdate(org.apache.cassandra.db.partitions.PartitionUpdate) Test(org.junit.Test)

Aggregations

RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)34 Test (org.junit.Test)22 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)19 Keyspace (org.apache.cassandra.db.Keyspace)17 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)14 ByteBuffer (java.nio.ByteBuffer)12 TableMetadata (org.apache.cassandra.schema.TableMetadata)12 DecoratedKey (org.apache.cassandra.db.DecoratedKey)10 Mutation (org.apache.cassandra.db.Mutation)9 ArrayList (java.util.ArrayList)4 UntypedResultSet (org.apache.cassandra.cql3.UntypedResultSet)4 File (java.io.File)3 LifecycleTransaction (org.apache.cassandra.db.lifecycle.LifecycleTransaction)3 UUID (java.util.UUID)2 CompactionController (org.apache.cassandra.db.compaction.CompactionController)2 CompactionIterator (org.apache.cassandra.db.compaction.CompactionIterator)2 Row (org.apache.cassandra.db.rows.Row)2 WriteTimeoutException (org.apache.cassandra.exceptions.WriteTimeoutException)2 DataInputBuffer (org.apache.cassandra.io.util.DataInputBuffer)2 DataInputPlus (org.apache.cassandra.io.util.DataInputPlus)2