use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.
the class SSTableRewriterTest method testAllKeysReadable.
@Test
public void testAllKeysReadable() throws Exception {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
for (int i = 0; i < 100; i++) {
String key = Integer.toString(i);
for (int j = 0; j < 10; j++) new RowUpdateBuilder(cfs.metadata(), 100, key).clustering(Integer.toString(j)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().apply();
}
cfs.forceBlockingFlush();
cfs.forceMajorCompaction();
validateKeys(keyspace);
assertEquals(1, cfs.getLiveSSTables().size());
SSTableReader s = cfs.getLiveSSTables().iterator().next();
Set<SSTableReader> compacting = new HashSet<>();
compacting.add(s);
int keyCount = 0;
try (ISSTableScanner scanner = compacting.iterator().next().getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0);
LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 1, false);
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
while (ci.hasNext()) {
rewriter.append(ci.next());
if (keyCount % 10 == 0) {
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
}
keyCount++;
validateKeys(keyspace);
}
rewriter.finish();
}
validateKeys(keyspace);
LifecycleTransaction.waitForDeletions();
validateCFS(cfs);
truncate(cfs);
}
use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.
the class IndexSummaryManagerTest method testRebuildAtSamplingLevel.
@Test
public void testRebuildAtSamplingLevel() throws IOException {
String ksname = KEYSPACE1;
String cfname = CF_STANDARDLOWiINTERVAL;
Keyspace keyspace = Keyspace.open(ksname);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
cfs.truncateBlocking();
cfs.disableAutoCompaction();
ByteBuffer value = ByteBuffer.wrap(new byte[100]);
int numRows = 256;
for (int row = 0; row < numRows; row++) {
String key = String.format("%3d", row);
new RowUpdateBuilder(cfs.metadata(), 0, key).clustering("column").add("val", value).build().applyUnsafe();
}
cfs.forceBlockingFlush();
List<SSTableReader> sstables = new ArrayList<>(cfs.getLiveSSTables());
assertEquals(1, sstables.size());
SSTableReader original = sstables.get(0);
SSTableReader sstable = original;
try (LifecycleTransaction txn = cfs.getTracker().tryModify(asList(sstable), OperationType.UNKNOWN)) {
for (int samplingLevel = 1; samplingLevel < BASE_SAMPLING_LEVEL; samplingLevel++) {
sstable = sstable.cloneWithNewSummarySamplingLevel(cfs, samplingLevel);
assertEquals(samplingLevel, sstable.getIndexSummarySamplingLevel());
int expectedSize = (numRows * samplingLevel) / (cfs.metadata().params.minIndexInterval * BASE_SAMPLING_LEVEL);
assertEquals(expectedSize, sstable.getIndexSummarySize(), 1);
txn.update(sstable, true);
txn.checkpoint();
}
txn.finish();
}
}
use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.
the class IndexSummaryManagerTest method createSSTables.
private void createSSTables(String ksname, String cfname, int numSSTables, int numPartition) {
Keyspace keyspace = Keyspace.open(ksname);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
cfs.truncateBlocking();
cfs.disableAutoCompaction();
ArrayList<Future> futures = new ArrayList<>(numSSTables);
ByteBuffer value = ByteBuffer.wrap(new byte[100]);
for (int sstable = 0; sstable < numSSTables; sstable++) {
for (int p = 0; p < numPartition; p++) {
String key = String.format("%3d", p);
new RowUpdateBuilder(cfs.metadata(), 0, key).clustering("column").add("val", value).build().applyUnsafe();
}
futures.add(cfs.forceFlush());
}
for (Future future : futures) {
try {
future.get();
} catch (InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
}
assertEquals(numSSTables, cfs.getLiveSSTables().size());
validateData(cfs, numPartition);
}
use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.
the class CommitLogSegmentManagerTest method testCompressedCommitLogBackpressure.
@Test
@BMRules(rules = { @BMRule(name = "Acquire Semaphore before sync", targetClass = "AbstractCommitLogService$1", targetMethod = "run", targetLocation = "AT INVOKE org.apache.cassandra.db.commitlog.CommitLog.sync", action = "org.apache.cassandra.db.commitlog.CommitLogSegmentManagerTest.allowSync.acquire()"), @BMRule(name = "Release Semaphore after sync", targetClass = "AbstractCommitLogService$1", targetMethod = "run", targetLocation = "AFTER INVOKE org.apache.cassandra.db.commitlog.CommitLog.sync", action = "org.apache.cassandra.db.commitlog.CommitLogSegmentManagerTest.allowSync.release()") })
public void testCompressedCommitLogBackpressure() throws Throwable {
// Perform all initialization before making CommitLog.Sync blocking
// Doing the initialization within the method guarantee that Byteman has performed its injections when we start
new Random().nextBytes(entropy);
DatabaseDescriptor.daemonInitialization();
DatabaseDescriptor.setCommitLogCompression(new ParameterizedClass("LZ4Compressor", ImmutableMap.of()));
DatabaseDescriptor.setCommitLogSegmentSize(1);
DatabaseDescriptor.setCommitLogSync(CommitLogSync.periodic);
DatabaseDescriptor.setCommitLogSyncPeriod(10 * 1000);
DatabaseDescriptor.setCommitLogMaxCompressionBuffersPerPool(3);
SchemaLoader.prepareServer();
SchemaLoader.createKeyspace(KEYSPACE1, KeyspaceParams.simple(1), SchemaLoader.standardCFMD(KEYSPACE1, STANDARD1, 0, AsciiType.instance, BytesType.instance), SchemaLoader.standardCFMD(KEYSPACE1, STANDARD2, 0, AsciiType.instance, BytesType.instance));
CompactionManager.instance.disableAutoCompaction();
ColumnFamilyStore cfs1 = Keyspace.open(KEYSPACE1).getColumnFamilyStore(STANDARD1);
final Mutation m = new RowUpdateBuilder(cfs1.metadata(), 0, "k").clustering("bytes").add("val", ByteBuffer.wrap(entropy)).build();
Thread dummyThread = new Thread(() -> {
for (int i = 0; i < 20; i++) CommitLog.instance.add(m);
});
try {
// Makes sure any call to CommitLog.sync is blocking
allowSync.acquire();
dummyThread.start();
AbstractCommitLogSegmentManager clsm = CommitLog.instance.segmentManager;
Util.spinAssertEquals(3, () -> clsm.getActiveSegments().size(), 5);
Thread.sleep(1000);
// Should only be able to create 3 segments not 7 because it blocks waiting for truncation that never comes
Assert.assertEquals(3, clsm.getActiveSegments().size());
// Discard the currently active segments so allocation can continue.
// Take snapshot of the list, otherwise this will also discard newly allocated segments.
new ArrayList<>(clsm.getActiveSegments()).forEach(clsm::archiveAndDiscard);
// The allocated count should reach the limit again.
Util.spinAssertEquals(3, () -> clsm.getActiveSegments().size(), 5);
} finally {
// Allow the CommitLog.sync to perform normally.
allowSync.release();
}
try {
// Wait for the dummy thread to die
dummyThread.join();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.
the class CommitLogSegmentManagerCDCTest method testCLSMCDCDiscardLogic.
@Test
public void testCLSMCDCDiscardLogic() throws Throwable {
CommitLogSegmentManagerCDC cdcMgr = (CommitLogSegmentManagerCDC) CommitLog.instance.segmentManager;
createTable("CREATE TABLE %s (idx int, data text, primary key(idx)) WITH cdc=false;");
for (int i = 0; i < 8; i++) {
new RowUpdateBuilder(currentTableMetadata(), 0, i).add("data", // fit 3 in a segment
randomizeBuffer(DatabaseDescriptor.getCommitLogSegmentSize() / 4)).build().apply();
}
// Should have 4 segments CDC since we haven't flushed yet, 3 PERMITTED, one of which is active, and 1 PERMITTED, in waiting
Assert.assertEquals(4 * DatabaseDescriptor.getCommitLogSegmentSize(), cdcMgr.updateCDCTotalSize());
expectCurrentCDCState(CDCState.PERMITTED);
CommitLog.instance.forceRecycleAllSegments();
// on flush, these PERMITTED should be deleted
Assert.assertEquals(0, new File(DatabaseDescriptor.getCDCLogLocation()).listFiles().length);
createTable("CREATE TABLE %s (idx int, data text, primary key(idx)) WITH cdc=true;");
for (int i = 0; i < 8; i++) {
new RowUpdateBuilder(currentTableMetadata(), 0, i).add("data", randomizeBuffer(DatabaseDescriptor.getCommitLogSegmentSize() / 4)).build().apply();
}
// 4 total again, 3 CONTAINS, 1 in waiting PERMITTED
Assert.assertEquals(4 * DatabaseDescriptor.getCommitLogSegmentSize(), cdcMgr.updateCDCTotalSize());
CommitLog.instance.forceRecycleAllSegments();
expectCurrentCDCState(CDCState.PERMITTED);
// On flush, PERMITTED is deleted, CONTAINS is preserved.
cdcMgr.awaitManagementTasksCompletion();
int seen = getCDCRawCount();
Assert.assertTrue("Expected >3 files in cdc_raw, saw: " + seen, seen >= 3);
}
Aggregations