Search in sources :

Example 16 with UpdateBuilder

use of org.apache.cassandra.UpdateBuilder in project cassandra by apache.

the class LeveledCompactionStrategyTest method testGrouperLevels.

/**
     * Ensure that the grouping operation preserves the levels of grouped tables
     */
@Test
public void testGrouperLevels() throws Exception {
    // 100 KB value, make it easy to have multiple files
    ByteBuffer value = ByteBuffer.wrap(new byte[100 * 1024]);
    //Need entropy to prevent compression so size is predictable with compression enabled/disabled
    new Random().nextBytes(value.array());
    // Enough data to have a level 1 and 2
    int rows = 40;
    int columns = 20;
    // Adds enough data to trigger multiple sstable per level
    for (int r = 0; r < rows; r++) {
        UpdateBuilder update = UpdateBuilder.create(cfs.metadata(), String.valueOf(r));
        for (int c = 0; c < columns; c++) update.newRow("column" + c).add("val", value);
        update.applyUnsafe();
        cfs.forceBlockingFlush();
    }
    waitForLeveling(cfs);
    CompactionStrategyManager strategyManager = cfs.getCompactionStrategyManager();
    // Checking we're not completely bad at math
    int l1Count = strategyManager.getSSTableCountPerLevel()[1];
    int l2Count = strategyManager.getSSTableCountPerLevel()[2];
    if (l1Count == 0 || l2Count == 0) {
        logger.error("L1 or L2 has 0 sstables. Expected > 0 on both.");
        logger.error("L1: " + l1Count);
        logger.error("L2: " + l2Count);
        Assert.fail();
    }
    Collection<Collection<SSTableReader>> groupedSSTables = cfs.getCompactionStrategyManager().groupSSTablesForAntiCompaction(cfs.getLiveSSTables());
    for (Collection<SSTableReader> sstableGroup : groupedSSTables) {
        int groupLevel = -1;
        Iterator<SSTableReader> it = sstableGroup.iterator();
        while (it.hasNext()) {
            SSTableReader sstable = it.next();
            int tableLevel = sstable.getSSTableLevel();
            if (groupLevel == -1)
                groupLevel = tableLevel;
            assert groupLevel == tableLevel;
        }
    }
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) Random(java.util.Random) Collection(java.util.Collection) UpdateBuilder(org.apache.cassandra.UpdateBuilder) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 17 with UpdateBuilder

use of org.apache.cassandra.UpdateBuilder in project cassandra by apache.

the class LeveledCompactionStrategyTest method testValidationMultipleSSTablePerLevel.

/*
     * This exercises in particular the code of #4142
     */
@Test
public void testValidationMultipleSSTablePerLevel() throws Exception {
    byte[] b = new byte[100 * 1024];
    new Random().nextBytes(b);
    // 100 KB value, make it easy to have multiple files
    ByteBuffer value = ByteBuffer.wrap(b);
    // Enough data to have a level 1 and 2
    int rows = 40;
    int columns = 20;
    // Adds enough data to trigger multiple sstable per level
    for (int r = 0; r < rows; r++) {
        UpdateBuilder update = UpdateBuilder.create(cfs.metadata(), String.valueOf(r));
        for (int c = 0; c < columns; c++) update.newRow("column" + c).add("val", value);
        update.applyUnsafe();
        cfs.forceBlockingFlush();
    }
    waitForLeveling(cfs);
    CompactionStrategyManager strategyManager = cfs.getCompactionStrategyManager();
    // Checking we're not completely bad at math
    assertTrue(strategyManager.getSSTableCountPerLevel()[1] > 0);
    assertTrue(strategyManager.getSSTableCountPerLevel()[2] > 0);
    Range<Token> range = new Range<>(Util.token(""), Util.token(""));
    int gcBefore = keyspace.getColumnFamilyStore(CF_STANDARDDLEVELED).gcBefore(FBUtilities.nowInSeconds());
    UUID parentRepSession = UUID.randomUUID();
    ActiveRepairService.instance.registerParentRepairSession(parentRepSession, FBUtilities.getBroadcastAddress(), Arrays.asList(cfs), Arrays.asList(range), false, ActiveRepairService.UNREPAIRED_SSTABLE, true);
    RepairJobDesc desc = new RepairJobDesc(parentRepSession, UUID.randomUUID(), KEYSPACE1, CF_STANDARDDLEVELED, Arrays.asList(range));
    Validator validator = new Validator(desc, FBUtilities.getBroadcastAddress(), gcBefore);
    CompactionManager.instance.submitValidation(cfs, validator).get();
}
Also used : Random(java.util.Random) RepairJobDesc(org.apache.cassandra.repair.RepairJobDesc) UpdateBuilder(org.apache.cassandra.UpdateBuilder) Token(org.apache.cassandra.dht.Token) Range(org.apache.cassandra.dht.Range) UUID(java.util.UUID) ByteBuffer(java.nio.ByteBuffer) Validator(org.apache.cassandra.repair.Validator) Test(org.junit.Test)

Example 18 with UpdateBuilder

use of org.apache.cassandra.UpdateBuilder in project cassandra by apache.

the class LeveledCompactionStrategyTest method testTokenRangeCompaction.

@Test
public void testTokenRangeCompaction() throws Exception {
    // Remove any existing data so we can start out clean with predictable number of sstables
    cfs.truncateBlocking();
    // Disable auto compaction so cassandra does not compact
    CompactionManager.instance.disableAutoCompaction();
    // 100 KB value, make it easy to have multiple files
    ByteBuffer value = ByteBuffer.wrap(new byte[100 * 1024]);
    DecoratedKey key1 = Util.dk(String.valueOf(1));
    DecoratedKey key2 = Util.dk(String.valueOf(2));
    List<DecoratedKey> keys = new ArrayList<>(Arrays.asList(key1, key2));
    int numIterations = 10;
    int columns = 2;
    // create 10 sstables that contain data for both key1 and key2
    for (int i = 0; i < numIterations; i++) {
        for (DecoratedKey key : keys) {
            UpdateBuilder update = UpdateBuilder.create(cfs.metadata(), key);
            for (int c = 0; c < columns; c++) update.newRow("column" + c).add("val", value);
            update.applyUnsafe();
        }
        cfs.forceBlockingFlush();
    }
    // create 20 more sstables with 10 containing data for key1 and other 10 containing data for key2
    for (int i = 0; i < numIterations; i++) {
        for (DecoratedKey key : keys) {
            UpdateBuilder update = UpdateBuilder.create(cfs.metadata(), key);
            for (int c = 0; c < columns; c++) update.newRow("column" + c).add("val", value);
            update.applyUnsafe();
            cfs.forceBlockingFlush();
        }
    }
    // We should have a total of 30 sstables by now
    assertEquals(30, cfs.getLiveSSTables().size());
    // Compact just the tables with key2
    // Bit hackish to use the key1.token as the prior key but works in BytesToken
    Range<Token> tokenRange = new Range<>(key2.getToken(), key2.getToken());
    Collection<Range<Token>> tokenRanges = new ArrayList<>(Arrays.asList(tokenRange));
    cfs.forceCompactionForTokenRange(tokenRanges);
    while (CompactionManager.instance.isCompacting(Arrays.asList(cfs))) {
        Thread.sleep(100);
    }
    // 20 tables that have key2 should have been compacted in to 1 table resulting in 11 (30-20+1)
    assertEquals(11, cfs.getLiveSSTables().size());
    // Compact just the tables with key1. At this point all 11 tables should have key1
    Range<Token> tokenRange2 = new Range<>(key1.getToken(), key1.getToken());
    Collection<Range<Token>> tokenRanges2 = new ArrayList<>(Arrays.asList(tokenRange2));
    cfs.forceCompactionForTokenRange(tokenRanges2);
    while (CompactionManager.instance.isCompacting(Arrays.asList(cfs))) {
        Thread.sleep(100);
    }
    // the 11 tables containing key1 should all compact to 1 table
    assertEquals(1, cfs.getLiveSSTables().size());
}
Also used : DecoratedKey(org.apache.cassandra.db.DecoratedKey) ArrayList(java.util.ArrayList) UpdateBuilder(org.apache.cassandra.UpdateBuilder) Token(org.apache.cassandra.dht.Token) Range(org.apache.cassandra.dht.Range) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 19 with UpdateBuilder

use of org.apache.cassandra.UpdateBuilder in project cassandra by apache.

the class AntiCompactionTest method writeFile.

private SSTableReader writeFile(ColumnFamilyStore cfs, int count) {
    File dir = cfs.getDirectories().getDirectoryForNewSSTables();
    Descriptor desc = cfs.newSSTableDescriptor(dir);
    try (SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, desc, 0, 0, NO_PENDING_REPAIR, new SerializationHeader(true, cfs.metadata(), cfs.metadata().regularAndStaticColumns(), EncodingStats.NO_STATS))) {
        for (int i = 0; i < count; i++) {
            UpdateBuilder builder = UpdateBuilder.create(metadata, ByteBufferUtil.bytes(i));
            for (int j = 0; j < count * 5; j++) builder.newRow("c" + j).add("val", "value1");
            writer.append(builder.build().unfilteredIterator());
        }
        Collection<SSTableReader> sstables = writer.finish(true);
        assertNotNull(sstables);
        assertEquals(1, sstables.size());
        return sstables.iterator().next();
    }
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) UpdateBuilder(org.apache.cassandra.UpdateBuilder) File(java.io.File)

Example 20 with UpdateBuilder

use of org.apache.cassandra.UpdateBuilder in project cassandra by apache.

the class SSTableRewriterTest method writeFiles.

public static Set<SSTableReader> writeFiles(ColumnFamilyStore cfs, int fileCount, int partitionCount, int cellCount, int cellSize) {
    int i = 0;
    Set<SSTableReader> result = new LinkedHashSet<>();
    for (int f = 0; f < fileCount; f++) {
        File dir = cfs.getDirectories().getDirectoryForNewSSTables();
        Descriptor desc = cfs.newSSTableDescriptor(dir);
        try (SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, desc, 0, 0, null, new SerializationHeader(true, cfs.metadata(), cfs.metadata().regularAndStaticColumns(), EncodingStats.NO_STATS))) {
            int end = f == fileCount - 1 ? partitionCount : ((f + 1) * partitionCount) / fileCount;
            for (; i < end; i++) {
                UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), ByteBufferUtil.bytes(i));
                for (int j = 0; j < cellCount; j++) builder.newRow(Integer.toString(i)).add("val", random(0, 1000));
                writer.append(builder.build().unfilteredIterator());
            }
            result.addAll(writer.finish(true));
        }
    }
    return result;
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) SerializationHeader(org.apache.cassandra.db.SerializationHeader) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) UpdateBuilder(org.apache.cassandra.UpdateBuilder) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) File(java.io.File)

Aggregations

UpdateBuilder (org.apache.cassandra.UpdateBuilder)20 Test (org.junit.Test)17 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)11 ByteBuffer (java.nio.ByteBuffer)10 Random (java.util.Random)4 File (java.io.File)2 ArrayList (java.util.ArrayList)2 RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)2 Range (org.apache.cassandra.dht.Range)2 Token (org.apache.cassandra.dht.Token)2 ConfigurationException (org.apache.cassandra.exceptions.ConfigurationException)2 ISSTableScanner (org.apache.cassandra.io.sstable.ISSTableScanner)2 TableMetadata (org.apache.cassandra.schema.TableMetadata)2 ImmutableMap (com.google.common.collect.ImmutableMap)1 Iterators (com.google.common.collect.Iterators)1 java.util (java.util)1 Collection (java.util.Collection)1 List (java.util.List)1 UUID (java.util.UUID)1 ExecutionException (java.util.concurrent.ExecutionException)1