Search in sources :

Example 86 with ColumnFamilyStore

use of org.apache.cassandra.db.ColumnFamilyStore in project eiger by wlloyd.

the class CleanupHelper method readData.

/* usually used to populate the cache */
protected void readData(String keyspace, String columnFamily, int offset, int numberOfRows) throws IOException {
    ColumnFamilyStore store = Table.open(keyspace).getColumnFamilyStore(columnFamily);
    for (int i = offset; i < offset + numberOfRows; i++) {
        DecoratedKey key = Util.dk("key" + i);
        QueryPath path = new QueryPath(columnFamily, null, ByteBufferUtil.bytes("col" + i));
        store.getColumnFamily(key, path, ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 1);
    }
}
Also used : QueryPath(org.apache.cassandra.db.filter.QueryPath) DecoratedKey(org.apache.cassandra.db.DecoratedKey) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore)

Example 87 with ColumnFamilyStore

use of org.apache.cassandra.db.ColumnFamilyStore in project eiger by wlloyd.

the class CompactionsPurgeTest method testMinorCompactionPurge.

@Test
public void testMinorCompactionPurge() throws IOException, ExecutionException, InterruptedException {
    CompactionManager.instance.disableAutoCompaction();
    Table table = Table.open(TABLE2);
    String cfName = "Standard1";
    ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
    RowMutation rm;
    for (int k = 1; k <= 2; ++k) {
        DecoratedKey key = Util.dk("key" + k);
        // inserts
        rm = new RowMutation(TABLE2, key.key);
        for (int i = 0; i < 10; i++) {
            rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
        }
        rm.apply();
        cfs.forceBlockingFlush();
        // deletes
        for (int i = 0; i < 10; i++) {
            rm = new RowMutation(TABLE2, key.key);
            rm.delete(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), 1);
            rm.apply();
        }
        cfs.forceBlockingFlush();
    }
    DecoratedKey key1 = Util.dk("key1");
    DecoratedKey key2 = Util.dk("key2");
    // flush, remember the current sstable and then resurrect one column
    // for first key. Then submit minor compaction on remembered sstables.
    cfs.forceBlockingFlush();
    Collection<SSTableReader> sstablesIncomplete = cfs.getSSTables();
    rm = new RowMutation(TABLE2, key1.key);
    rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(5))), ByteBufferUtil.EMPTY_BYTE_BUFFER, 2);
    rm.apply();
    cfs.forceBlockingFlush();
    new CompactionTask(cfs, sstablesIncomplete, Integer.MAX_VALUE).execute(null);
    // verify that minor compaction does not GC when key is present
    // in a non-compacted sstable
    ColumnFamily cf = cfs.getColumnFamily(QueryFilter.getIdentityFilter(key1, new QueryPath(cfName)));
    assert cf.getColumnCount() == 10;
    // verify that minor compaction does GC when key is provably not
    // present in a non-compacted sstable
    cf = cfs.getColumnFamily(QueryFilter.getIdentityFilter(key2, new QueryPath(cfName)));
    assert cf == null;
}
Also used : QueryPath(org.apache.cassandra.db.filter.QueryPath) SSTableReader(org.apache.cassandra.io.sstable.SSTableReader) Table(org.apache.cassandra.db.Table) DecoratedKey(org.apache.cassandra.db.DecoratedKey) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) RowMutation(org.apache.cassandra.db.RowMutation) ColumnFamily(org.apache.cassandra.db.ColumnFamily) Test(org.junit.Test)

Example 88 with ColumnFamilyStore

use of org.apache.cassandra.db.ColumnFamilyStore in project eiger by wlloyd.

the class CompactionsPurgeTest method testMajorCompactionPurge.

@Test
public void testMajorCompactionPurge() throws IOException, ExecutionException, InterruptedException {
    CompactionManager.instance.disableAutoCompaction();
    Table table = Table.open(TABLE1);
    String cfName = "Standard1";
    ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
    DecoratedKey key = Util.dk("key1");
    RowMutation rm;
    // inserts
    rm = new RowMutation(TABLE1, key.key);
    for (int i = 0; i < 10; i++) {
        rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
    }
    rm.apply();
    cfs.forceBlockingFlush();
    // deletes
    for (int i = 0; i < 10; i++) {
        rm = new RowMutation(TABLE1, key.key);
        rm.delete(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), 1);
        rm.apply();
    }
    cfs.forceBlockingFlush();
    // resurrect one column
    rm = new RowMutation(TABLE1, key.key);
    rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(5))), ByteBufferUtil.EMPTY_BYTE_BUFFER, 2);
    rm.apply();
    cfs.forceBlockingFlush();
    // major compact and test that all columns but the resurrected one is completely gone
    CompactionManager.instance.submitMaximal(cfs, Integer.MAX_VALUE).get();
    cfs.invalidateCachedRow(key);
    ColumnFamily cf = cfs.getColumnFamily(QueryFilter.getIdentityFilter(key, new QueryPath(cfName)));
    assertColumns(cf, "5");
    assert cf.getColumn(ByteBufferUtil.bytes(String.valueOf(5))) != null;
}
Also used : QueryPath(org.apache.cassandra.db.filter.QueryPath) Table(org.apache.cassandra.db.Table) DecoratedKey(org.apache.cassandra.db.DecoratedKey) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) RowMutation(org.apache.cassandra.db.RowMutation) ColumnFamily(org.apache.cassandra.db.ColumnFamily) Test(org.junit.Test)

Example 89 with ColumnFamilyStore

use of org.apache.cassandra.db.ColumnFamilyStore in project eiger by wlloyd.

the class CompactionsPurgeTest method testCompactionPurgeOneFile.

@Test
public void testCompactionPurgeOneFile() throws IOException, ExecutionException, InterruptedException {
    CompactionManager.instance.disableAutoCompaction();
    Table table = Table.open(TABLE1);
    String cfName = "Standard2";
    ColumnFamilyStore cfs = table.getColumnFamilyStore(cfName);
    DecoratedKey key = Util.dk("key1");
    RowMutation rm;
    // inserts
    rm = new RowMutation(TABLE1, key.key);
    for (int i = 0; i < 5; i++) {
        rm.add(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
    }
    rm.apply();
    // deletes
    for (int i = 0; i < 5; i++) {
        rm = new RowMutation(TABLE1, key.key);
        rm.delete(new QueryPath(cfName, null, ByteBufferUtil.bytes(String.valueOf(i))), 1);
        rm.apply();
    }
    cfs.forceBlockingFlush();
    // inserts & deletes were in the same memtable -> only deletes in sstable
    assert cfs.getSSTables().size() == 1 : cfs.getSSTables();
    // compact and test that the row is completely gone
    Util.compactAll(cfs).get();
    assert cfs.getSSTables().isEmpty();
    ColumnFamily cf = table.getColumnFamilyStore(cfName).getColumnFamily(QueryFilter.getIdentityFilter(key, new QueryPath(cfName)));
    assert cf == null : cf;
}
Also used : QueryPath(org.apache.cassandra.db.filter.QueryPath) Table(org.apache.cassandra.db.Table) DecoratedKey(org.apache.cassandra.db.DecoratedKey) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) RowMutation(org.apache.cassandra.db.RowMutation) ColumnFamily(org.apache.cassandra.db.ColumnFamily) Test(org.junit.Test)

Example 90 with ColumnFamilyStore

use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.

the class CommitLogSegmentManagerTest method testCompressedCommitLogBackpressure.

@Test
@BMRules(rules = { @BMRule(name = "Acquire Semaphore before sync", targetClass = "AbstractCommitLogService$1", targetMethod = "run", targetLocation = "AT INVOKE org.apache.cassandra.db.commitlog.CommitLog.sync", action = "org.apache.cassandra.db.commitlog.CommitLogSegmentManagerTest.allowSync.acquire()"), @BMRule(name = "Release Semaphore after sync", targetClass = "AbstractCommitLogService$1", targetMethod = "run", targetLocation = "AFTER INVOKE org.apache.cassandra.db.commitlog.CommitLog.sync", action = "org.apache.cassandra.db.commitlog.CommitLogSegmentManagerTest.allowSync.release()") })
public void testCompressedCommitLogBackpressure() throws Throwable {
    // Perform all initialization before making CommitLog.Sync blocking
    // Doing the initialization within the method guarantee that Byteman has performed its injections when we start
    new Random().nextBytes(entropy);
    DatabaseDescriptor.daemonInitialization();
    DatabaseDescriptor.setCommitLogCompression(new ParameterizedClass("LZ4Compressor", ImmutableMap.of()));
    DatabaseDescriptor.setCommitLogSegmentSize(1);
    DatabaseDescriptor.setCommitLogSync(CommitLogSync.periodic);
    DatabaseDescriptor.setCommitLogSyncPeriod(10 * 1000);
    DatabaseDescriptor.setCommitLogMaxCompressionBuffersPerPool(3);
    SchemaLoader.prepareServer();
    SchemaLoader.createKeyspace(KEYSPACE1, KeyspaceParams.simple(1), SchemaLoader.standardCFMD(KEYSPACE1, STANDARD1, 0, AsciiType.instance, BytesType.instance), SchemaLoader.standardCFMD(KEYSPACE1, STANDARD2, 0, AsciiType.instance, BytesType.instance));
    CompactionManager.instance.disableAutoCompaction();
    ColumnFamilyStore cfs1 = Keyspace.open(KEYSPACE1).getColumnFamilyStore(STANDARD1);
    final Mutation m = new RowUpdateBuilder(cfs1.metadata(), 0, "k").clustering("bytes").add("val", ByteBuffer.wrap(entropy)).build();
    Thread dummyThread = new Thread(() -> {
        for (int i = 0; i < 20; i++) CommitLog.instance.add(m);
    });
    try {
        // Makes sure any call to CommitLog.sync is blocking
        allowSync.acquire();
        dummyThread.start();
        AbstractCommitLogSegmentManager clsm = CommitLog.instance.segmentManager;
        Util.spinAssertEquals(3, () -> clsm.getActiveSegments().size(), 5);
        Thread.sleep(1000);
        // Should only be able to create 3 segments not 7 because it blocks waiting for truncation that never comes
        Assert.assertEquals(3, clsm.getActiveSegments().size());
        // Discard the currently active segments so allocation can continue.
        // Take snapshot of the list, otherwise this will also discard newly allocated segments.
        new ArrayList<>(clsm.getActiveSegments()).forEach(clsm::archiveAndDiscard);
        // The allocated count should reach the limit again.
        Util.spinAssertEquals(3, () -> clsm.getActiveSegments().size(), 5);
    } finally {
        // Allow the CommitLog.sync to perform normally.
        allowSync.release();
    }
    try {
        // Wait for the dummy thread to die
        dummyThread.join();
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
    }
}
Also used : Random(java.util.Random) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) ParameterizedClass(org.apache.cassandra.config.ParameterizedClass) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Mutation(org.apache.cassandra.db.Mutation) Test(org.junit.Test) BMRules(org.jboss.byteman.contrib.bmunit.BMRules)

Aggregations

ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)175 Test (org.junit.Test)110 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)90 Keyspace (org.apache.cassandra.db.Keyspace)64 File (java.io.File)30 Directories (org.apache.cassandra.db.Directories)25 LifecycleTransaction (org.apache.cassandra.db.lifecycle.LifecycleTransaction)24 DecoratedKey (org.apache.cassandra.db.DecoratedKey)22 RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)20 AbstractTransactionalTest (org.apache.cassandra.utils.concurrent.AbstractTransactionalTest)20 RandomAccessFile (java.io.RandomAccessFile)19 CompactionController (org.apache.cassandra.db.compaction.CompactionController)14 ArrayList (java.util.ArrayList)13 ByteBuffer (java.nio.ByteBuffer)12 CompactionIterator (org.apache.cassandra.db.compaction.CompactionIterator)12 Range (org.apache.cassandra.dht.Range)11 Table (org.apache.cassandra.db.Table)9 Token (org.apache.cassandra.dht.Token)9 Descriptor (org.apache.cassandra.io.sstable.Descriptor)9 StatsMetadata (org.apache.cassandra.io.sstable.metadata.StatsMetadata)9