use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.
the class CommitLogSegmentManagerCDCTest method testCDCWriteTimeout.
@Test
public void testCDCWriteTimeout() throws Throwable {
createTable("CREATE TABLE %s (idx int, data text, primary key(idx)) WITH cdc=true;");
CommitLogSegmentManagerCDC cdcMgr = (CommitLogSegmentManagerCDC) CommitLog.instance.segmentManager;
TableMetadata cfm = currentTableMetadata();
// Confirm that logic to check for whether or not we can allocate new CDC segments works
Integer originalCDCSize = DatabaseDescriptor.getCDCSpaceInMB();
try {
DatabaseDescriptor.setCDCSpaceInMB(32);
// Spin until we hit CDC capacity and make sure we get a WriteTimeout
try {
// Should trigger on anything < 20:1 compression ratio during compressed test
for (int i = 0; i < 100; i++) {
new RowUpdateBuilder(cfm, 0, i).add("data", randomizeBuffer(DatabaseDescriptor.getCommitLogSegmentSize() / 3)).build().apply();
}
Assert.fail("Expected WriteTimeoutException from full CDC but did not receive it.");
} catch (WriteTimeoutException e) {
// expected, do nothing
}
expectCurrentCDCState(CDCState.FORBIDDEN);
// Confirm we can create a non-cdc table and write to it even while at cdc capacity
createTable("CREATE TABLE %s (idx int, data text, primary key(idx)) WITH cdc=false;");
execute("INSERT INTO %s (idx, data) VALUES (1, '1');");
// Confirm that, on flush+recyle, we see files show up in cdc_raw
Keyspace.open(keyspace()).getColumnFamilyStore(currentTable()).forceBlockingFlush();
CommitLog.instance.forceRecycleAllSegments();
cdcMgr.awaitManagementTasksCompletion();
Assert.assertTrue("Expected files to be moved to overflow.", getCDCRawCount() > 0);
// Simulate a CDC consumer reading files then deleting them
for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).listFiles()) FileUtils.deleteWithConfirm(f);
// Update size tracker to reflect deleted files. Should flip flag on current allocatingFrom to allow.
cdcMgr.updateCDCTotalSize();
expectCurrentCDCState(CDCState.PERMITTED);
} finally {
DatabaseDescriptor.setCDCSpaceInMB(originalCDCSize);
}
}
use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.
the class DateTieredCompactionStrategyTest method testPrepBucket.
@Test
public void testPrepBucket() {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
cfs.disableAutoCompaction();
ByteBuffer value = ByteBuffer.wrap(new byte[100]);
// create 3 sstables
int numSSTables = 3;
for (int r = 0; r < numSSTables; r++) {
DecoratedKey key = Util.dk(String.valueOf(r));
new RowUpdateBuilder(cfs.metadata(), r, key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
cfs.forceBlockingFlush();
}
cfs.forceBlockingFlush();
List<SSTableReader> sstrs = new ArrayList<>(cfs.getLiveSSTables());
List<SSTableReader> newBucket = newestBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32, 9, 10, Long.MAX_VALUE, new SizeTieredCompactionStrategyOptions());
assertTrue("incoming bucket should not be accepted when it has below the min threshold SSTables", newBucket.isEmpty());
newBucket = newestBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32, 10, 10, Long.MAX_VALUE, new SizeTieredCompactionStrategyOptions());
assertFalse("non-incoming bucket should be accepted when it has at least 2 SSTables", newBucket.isEmpty());
assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(0).getMinTimestamp(), sstrs.get(0).getMaxTimestamp());
assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(1).getMinTimestamp(), sstrs.get(1).getMaxTimestamp());
assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(2).getMinTimestamp(), sstrs.get(2).getMaxTimestamp());
cfs.truncateBlocking();
}
use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.
the class DateTieredCompactionStrategyTest method testDropExpiredSSTables.
@Test
public void testDropExpiredSSTables() throws InterruptedException {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
cfs.disableAutoCompaction();
ByteBuffer value = ByteBuffer.wrap(new byte[100]);
// create 2 sstables
DecoratedKey key = Util.dk(String.valueOf("expired"));
new RowUpdateBuilder(cfs.metadata(), System.currentTimeMillis(), 1, key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
cfs.forceBlockingFlush();
SSTableReader expiredSSTable = cfs.getLiveSSTables().iterator().next();
Thread.sleep(10);
key = Util.dk(String.valueOf("nonexpired"));
new RowUpdateBuilder(cfs.metadata(), System.currentTimeMillis(), key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
cfs.forceBlockingFlush();
assertEquals(cfs.getLiveSSTables().size(), 2);
Map<String, String> options = new HashMap<>();
options.put(DateTieredCompactionStrategyOptions.BASE_TIME_KEY, "30");
options.put(DateTieredCompactionStrategyOptions.TIMESTAMP_RESOLUTION_KEY, "MILLISECONDS");
options.put(DateTieredCompactionStrategyOptions.MAX_SSTABLE_AGE_KEY, Double.toString((1d / (24 * 60 * 60))));
options.put(DateTieredCompactionStrategyOptions.EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_KEY, "0");
DateTieredCompactionStrategy dtcs = new DateTieredCompactionStrategy(cfs, options);
for (SSTableReader sstable : cfs.getLiveSSTables()) dtcs.addSSTable(sstable);
dtcs.startup();
assertNull(dtcs.getNextBackgroundTask((int) (System.currentTimeMillis() / 1000)));
Thread.sleep(2000);
AbstractCompactionTask t = dtcs.getNextBackgroundTask((int) (System.currentTimeMillis() / 1000));
assertNotNull(t);
assertEquals(1, Iterables.size(t.transaction.originals()));
SSTableReader sstable = t.transaction.originals().iterator().next();
assertEquals(sstable, expiredSSTable);
t.transaction.abort();
cfs.truncateBlocking();
}
use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.
the class SizeTieredCompactionStrategyTest method testPrepBucket.
@Test
public void testPrepBucket() throws Exception {
String ksname = KEYSPACE1;
String cfname = "Standard1";
Keyspace keyspace = Keyspace.open(ksname);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
cfs.truncateBlocking();
cfs.disableAutoCompaction();
ByteBuffer value = ByteBuffer.wrap(new byte[100]);
// create 3 sstables
int numSSTables = 3;
for (int r = 0; r < numSSTables; r++) {
String key = String.valueOf(r);
new RowUpdateBuilder(cfs.metadata(), 0, key).clustering("column").add("val", value).build().applyUnsafe();
cfs.forceBlockingFlush();
}
cfs.forceBlockingFlush();
List<SSTableReader> sstrs = new ArrayList<>(cfs.getLiveSSTables());
Pair<List<SSTableReader>, Double> bucket;
List<SSTableReader> interestingBucket = mostInterestingBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32);
assertTrue("nothing should be returned when all buckets are below the min threshold", interestingBucket.isEmpty());
sstrs.get(0).overrideReadMeter(new RestorableMeter(100.0, 100.0));
sstrs.get(1).overrideReadMeter(new RestorableMeter(200.0, 200.0));
sstrs.get(2).overrideReadMeter(new RestorableMeter(300.0, 300.0));
long estimatedKeys = sstrs.get(0).estimatedKeys();
// if we have more than the max threshold, the coldest should be dropped
bucket = trimToThresholdWithHotness(sstrs, 2);
assertEquals("one bucket should have been dropped", 2, bucket.left.size());
double expectedBucketHotness = (200.0 + 300.0) / estimatedKeys;
assertEquals(String.format("bucket hotness (%f) should be close to %f", bucket.right, expectedBucketHotness), expectedBucketHotness, bucket.right, 1.0);
}
use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.
the class PartitionUpdateTest method testOperationCountWithCompactTable.
@Test
public void testOperationCountWithCompactTable() {
createTable("CREATE TABLE %s (key text PRIMARY KEY, a int) WITH COMPACT STORAGE");
TableMetadata cfm = currentTableMetadata();
PartitionUpdate update = new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), "key0").add("a", 1).buildUpdate();
Assert.assertEquals(1, update.operationCount());
update = new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), "key0").buildUpdate();
Assert.assertEquals(0, update.operationCount());
}
Aggregations