Search in sources :

Example 16 with RowUpdateBuilder

use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.

the class SSTableRewriterTest method testAllKeysReadable.

@Test
public void testAllKeysReadable() throws Exception {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    truncate(cfs);
    for (int i = 0; i < 100; i++) {
        String key = Integer.toString(i);
        for (int j = 0; j < 10; j++) new RowUpdateBuilder(cfs.metadata(), 100, key).clustering(Integer.toString(j)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().apply();
    }
    cfs.forceBlockingFlush();
    cfs.forceMajorCompaction();
    validateKeys(keyspace);
    assertEquals(1, cfs.getLiveSSTables().size());
    SSTableReader s = cfs.getLiveSSTables().iterator().next();
    Set<SSTableReader> compacting = new HashSet<>();
    compacting.add(s);
    int keyCount = 0;
    try (ISSTableScanner scanner = compacting.iterator().next().getScanner();
        CompactionController controller = new CompactionController(cfs, compacting, 0);
        LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
        SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, 1, false);
        CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())) {
        rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
        while (ci.hasNext()) {
            rewriter.append(ci.next());
            if (keyCount % 10 == 0) {
                rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
            }
            keyCount++;
            validateKeys(keyspace);
        }
        rewriter.finish();
    }
    validateKeys(keyspace);
    LifecycleTransaction.waitForDeletions();
    validateCFS(cfs);
    truncate(cfs);
}
Also used : CompactionController(org.apache.cassandra.db.compaction.CompactionController) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) CompactionIterator(org.apache.cassandra.db.compaction.CompactionIterator) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Test(org.junit.Test)

Example 17 with RowUpdateBuilder

use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.

the class IndexSummaryManagerTest method testRebuildAtSamplingLevel.

@Test
public void testRebuildAtSamplingLevel() throws IOException {
    String ksname = KEYSPACE1;
    String cfname = CF_STANDARDLOWiINTERVAL;
    Keyspace keyspace = Keyspace.open(ksname);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
    cfs.truncateBlocking();
    cfs.disableAutoCompaction();
    ByteBuffer value = ByteBuffer.wrap(new byte[100]);
    int numRows = 256;
    for (int row = 0; row < numRows; row++) {
        String key = String.format("%3d", row);
        new RowUpdateBuilder(cfs.metadata(), 0, key).clustering("column").add("val", value).build().applyUnsafe();
    }
    cfs.forceBlockingFlush();
    List<SSTableReader> sstables = new ArrayList<>(cfs.getLiveSSTables());
    assertEquals(1, sstables.size());
    SSTableReader original = sstables.get(0);
    SSTableReader sstable = original;
    try (LifecycleTransaction txn = cfs.getTracker().tryModify(asList(sstable), OperationType.UNKNOWN)) {
        for (int samplingLevel = 1; samplingLevel < BASE_SAMPLING_LEVEL; samplingLevel++) {
            sstable = sstable.cloneWithNewSummarySamplingLevel(cfs, samplingLevel);
            assertEquals(samplingLevel, sstable.getIndexSummarySamplingLevel());
            int expectedSize = (numRows * samplingLevel) / (cfs.metadata().params.minIndexInterval * BASE_SAMPLING_LEVEL);
            assertEquals(expectedSize, sstable.getIndexSummarySize(), 1);
            txn.update(sstable, true);
            txn.checkpoint();
        }
        txn.finish();
    }
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 18 with RowUpdateBuilder

use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.

the class IndexSummaryManagerTest method createSSTables.

private void createSSTables(String ksname, String cfname, int numSSTables, int numPartition) {
    Keyspace keyspace = Keyspace.open(ksname);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
    cfs.truncateBlocking();
    cfs.disableAutoCompaction();
    ArrayList<Future> futures = new ArrayList<>(numSSTables);
    ByteBuffer value = ByteBuffer.wrap(new byte[100]);
    for (int sstable = 0; sstable < numSSTables; sstable++) {
        for (int p = 0; p < numPartition; p++) {
            String key = String.format("%3d", p);
            new RowUpdateBuilder(cfs.metadata(), 0, key).clustering("column").add("val", value).build().applyUnsafe();
        }
        futures.add(cfs.forceFlush());
    }
    for (Future future : futures) {
        try {
            future.get();
        } catch (InterruptedException | ExecutionException e) {
            throw new RuntimeException(e);
        }
    }
    assertEquals(numSSTables, cfs.getLiveSSTables().size());
    validateData(cfs, numPartition);
}
Also used : ByteBuffer(java.nio.ByteBuffer) CompactionInterruptedException(org.apache.cassandra.db.compaction.CompactionInterruptedException) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore)

Example 19 with RowUpdateBuilder

use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.

the class CommitLogSegmentManagerTest method testCompressedCommitLogBackpressure.

@Test
@BMRules(rules = { @BMRule(name = "Acquire Semaphore before sync", targetClass = "AbstractCommitLogService$1", targetMethod = "run", targetLocation = "AT INVOKE org.apache.cassandra.db.commitlog.CommitLog.sync", action = "org.apache.cassandra.db.commitlog.CommitLogSegmentManagerTest.allowSync.acquire()"), @BMRule(name = "Release Semaphore after sync", targetClass = "AbstractCommitLogService$1", targetMethod = "run", targetLocation = "AFTER INVOKE org.apache.cassandra.db.commitlog.CommitLog.sync", action = "org.apache.cassandra.db.commitlog.CommitLogSegmentManagerTest.allowSync.release()") })
public void testCompressedCommitLogBackpressure() throws Throwable {
    // Perform all initialization before making CommitLog.Sync blocking
    // Doing the initialization within the method guarantee that Byteman has performed its injections when we start
    new Random().nextBytes(entropy);
    DatabaseDescriptor.daemonInitialization();
    DatabaseDescriptor.setCommitLogCompression(new ParameterizedClass("LZ4Compressor", ImmutableMap.of()));
    DatabaseDescriptor.setCommitLogSegmentSize(1);
    DatabaseDescriptor.setCommitLogSync(CommitLogSync.periodic);
    DatabaseDescriptor.setCommitLogSyncPeriod(10 * 1000);
    DatabaseDescriptor.setCommitLogMaxCompressionBuffersPerPool(3);
    SchemaLoader.prepareServer();
    SchemaLoader.createKeyspace(KEYSPACE1, KeyspaceParams.simple(1), SchemaLoader.standardCFMD(KEYSPACE1, STANDARD1, 0, AsciiType.instance, BytesType.instance), SchemaLoader.standardCFMD(KEYSPACE1, STANDARD2, 0, AsciiType.instance, BytesType.instance));
    CompactionManager.instance.disableAutoCompaction();
    ColumnFamilyStore cfs1 = Keyspace.open(KEYSPACE1).getColumnFamilyStore(STANDARD1);
    final Mutation m = new RowUpdateBuilder(cfs1.metadata(), 0, "k").clustering("bytes").add("val", ByteBuffer.wrap(entropy)).build();
    Thread dummyThread = new Thread(() -> {
        for (int i = 0; i < 20; i++) CommitLog.instance.add(m);
    });
    try {
        // Makes sure any call to CommitLog.sync is blocking
        allowSync.acquire();
        dummyThread.start();
        AbstractCommitLogSegmentManager clsm = CommitLog.instance.segmentManager;
        Util.spinAssertEquals(3, () -> clsm.getActiveSegments().size(), 5);
        Thread.sleep(1000);
        // Should only be able to create 3 segments not 7 because it blocks waiting for truncation that never comes
        Assert.assertEquals(3, clsm.getActiveSegments().size());
        // Discard the currently active segments so allocation can continue.
        // Take snapshot of the list, otherwise this will also discard newly allocated segments.
        new ArrayList<>(clsm.getActiveSegments()).forEach(clsm::archiveAndDiscard);
        // The allocated count should reach the limit again.
        Util.spinAssertEquals(3, () -> clsm.getActiveSegments().size(), 5);
    } finally {
        // Allow the CommitLog.sync to perform normally.
        allowSync.release();
    }
    try {
        // Wait for the dummy thread to die
        dummyThread.join();
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
    }
}
Also used : Random(java.util.Random) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) ParameterizedClass(org.apache.cassandra.config.ParameterizedClass) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Mutation(org.apache.cassandra.db.Mutation) Test(org.junit.Test) BMRules(org.jboss.byteman.contrib.bmunit.BMRules)

Example 20 with RowUpdateBuilder

use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.

the class CommitLogSegmentManagerCDCTest method testCLSMCDCDiscardLogic.

@Test
public void testCLSMCDCDiscardLogic() throws Throwable {
    CommitLogSegmentManagerCDC cdcMgr = (CommitLogSegmentManagerCDC) CommitLog.instance.segmentManager;
    createTable("CREATE TABLE %s (idx int, data text, primary key(idx)) WITH cdc=false;");
    for (int i = 0; i < 8; i++) {
        new RowUpdateBuilder(currentTableMetadata(), 0, i).add("data", // fit 3 in a segment
        randomizeBuffer(DatabaseDescriptor.getCommitLogSegmentSize() / 4)).build().apply();
    }
    // Should have 4 segments CDC since we haven't flushed yet, 3 PERMITTED, one of which is active, and 1 PERMITTED, in waiting
    Assert.assertEquals(4 * DatabaseDescriptor.getCommitLogSegmentSize(), cdcMgr.updateCDCTotalSize());
    expectCurrentCDCState(CDCState.PERMITTED);
    CommitLog.instance.forceRecycleAllSegments();
    // on flush, these PERMITTED should be deleted
    Assert.assertEquals(0, new File(DatabaseDescriptor.getCDCLogLocation()).listFiles().length);
    createTable("CREATE TABLE %s (idx int, data text, primary key(idx)) WITH cdc=true;");
    for (int i = 0; i < 8; i++) {
        new RowUpdateBuilder(currentTableMetadata(), 0, i).add("data", randomizeBuffer(DatabaseDescriptor.getCommitLogSegmentSize() / 4)).build().apply();
    }
    // 4 total again, 3 CONTAINS, 1 in waiting PERMITTED
    Assert.assertEquals(4 * DatabaseDescriptor.getCommitLogSegmentSize(), cdcMgr.updateCDCTotalSize());
    CommitLog.instance.forceRecycleAllSegments();
    expectCurrentCDCState(CDCState.PERMITTED);
    // On flush, PERMITTED is deleted, CONTAINS is preserved.
    cdcMgr.awaitManagementTasksCompletion();
    int seen = getCDCRawCount();
    Assert.assertTrue("Expected >3 files in cdc_raw, saw: " + seen, seen >= 3);
}
Also used : RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) File(java.io.File) Test(org.junit.Test)

Aggregations

RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)34 Test (org.junit.Test)22 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)19 Keyspace (org.apache.cassandra.db.Keyspace)17 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)14 ByteBuffer (java.nio.ByteBuffer)12 TableMetadata (org.apache.cassandra.schema.TableMetadata)12 DecoratedKey (org.apache.cassandra.db.DecoratedKey)10 Mutation (org.apache.cassandra.db.Mutation)9 ArrayList (java.util.ArrayList)4 UntypedResultSet (org.apache.cassandra.cql3.UntypedResultSet)4 File (java.io.File)3 LifecycleTransaction (org.apache.cassandra.db.lifecycle.LifecycleTransaction)3 UUID (java.util.UUID)2 CompactionController (org.apache.cassandra.db.compaction.CompactionController)2 CompactionIterator (org.apache.cassandra.db.compaction.CompactionIterator)2 Row (org.apache.cassandra.db.rows.Row)2 WriteTimeoutException (org.apache.cassandra.exceptions.WriteTimeoutException)2 DataInputBuffer (org.apache.cassandra.io.util.DataInputBuffer)2 DataInputPlus (org.apache.cassandra.io.util.DataInputPlus)2