Search in sources :

Example 66 with SSTableReader

use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.

the class LegacySSTableTest method streamLegacyTable.

private void streamLegacyTable(String tablePattern, String legacyVersion, String compactNameSuffix) throws Exception {
    String table = String.format(tablePattern, legacyVersion, compactNameSuffix);
    SSTableReader sstable = SSTableReader.open(getDescriptor(legacyVersion, table));
    IPartitioner p = sstable.getPartitioner();
    List<Range<Token>> ranges = new ArrayList<>();
    ranges.add(new Range<>(p.getMinimumToken(), p.getToken(ByteBufferUtil.bytes("100"))));
    ranges.add(new Range<>(p.getToken(ByteBufferUtil.bytes("100")), p.getMinimumToken()));
    ArrayList<StreamSession.SSTableStreamingSections> details = new ArrayList<>();
    details.add(new StreamSession.SSTableStreamingSections(sstable.ref(), sstable.getPositionsForRanges(ranges), sstable.estimatedKeysForRanges(ranges), sstable.getSSTableMetadata().repairedAt));
    new StreamPlan("LegacyStreamingTest").transferFiles(FBUtilities.getBroadcastAddress(), details).execute().get();
}
Also used : StreamPlan(org.apache.cassandra.streaming.StreamPlan) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) StreamSession(org.apache.cassandra.streaming.StreamSession) ArrayList(java.util.ArrayList) Range(org.apache.cassandra.dht.Range) IPartitioner(org.apache.cassandra.dht.IPartitioner)

Example 67 with SSTableReader

use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.

the class SSTableMetadataTest method testTrackMaxDeletionTime.

@Test
public void testTrackMaxDeletionTime() {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard1");
    long timestamp = System.currentTimeMillis();
    for (int i = 0; i < 10; i++) {
        DecoratedKey key = Util.dk(Integer.toString(i));
        for (int j = 0; j < 10; j++) new RowUpdateBuilder(store.metadata(), timestamp, 10 + j, Integer.toString(i)).clustering(Integer.toString(j)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
    }
    new RowUpdateBuilder(store.metadata(), timestamp, 10000, "longttl").clustering("col").add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
    store.forceBlockingFlush();
    assertEquals(1, store.getLiveSSTables().size());
    int ttltimestamp = (int) (System.currentTimeMillis() / 1000);
    int firstDelTime = 0;
    for (SSTableReader sstable : store.getLiveSSTables()) {
        firstDelTime = sstable.getSSTableMetadata().maxLocalDeletionTime;
        assertEquals(ttltimestamp + 10000, firstDelTime, 10);
    }
    new RowUpdateBuilder(store.metadata(), timestamp, 20000, "longttl2").clustering("col").add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
    ttltimestamp = (int) (System.currentTimeMillis() / 1000);
    store.forceBlockingFlush();
    assertEquals(2, store.getLiveSSTables().size());
    List<SSTableReader> sstables = new ArrayList<>(store.getLiveSSTables());
    if (sstables.get(0).getSSTableMetadata().maxLocalDeletionTime < sstables.get(1).getSSTableMetadata().maxLocalDeletionTime) {
        assertEquals(sstables.get(0).getSSTableMetadata().maxLocalDeletionTime, firstDelTime);
        assertEquals(sstables.get(1).getSSTableMetadata().maxLocalDeletionTime, ttltimestamp + 20000, 10);
    } else {
        assertEquals(sstables.get(1).getSSTableMetadata().maxLocalDeletionTime, firstDelTime);
        assertEquals(sstables.get(0).getSSTableMetadata().maxLocalDeletionTime, ttltimestamp + 20000, 10);
    }
    Util.compact(store, store.getLiveSSTables());
    assertEquals(1, store.getLiveSSTables().size());
    for (SSTableReader sstable : store.getLiveSSTables()) {
        assertEquals(sstable.getSSTableMetadata().maxLocalDeletionTime, ttltimestamp + 20000, 10);
    }
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) Keyspace(org.apache.cassandra.db.Keyspace) DecoratedKey(org.apache.cassandra.db.DecoratedKey) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Example 68 with SSTableReader

use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.

the class SSTableMetadataTest method testWithDeletes.

/**
     * 1. create a row with columns with ttls, 5x100 and 1x1000
     * 2. flush, verify (maxLocalDeletionTime = time+1000)
     * 3. delete column with ttl=1000
     * 4. flush, verify the new sstable (maxLocalDeletionTime = ~now)
     * 5. compact
     * 6. verify resulting sstable has maxLocalDeletionTime = time + 100.
     *
     * @throws ExecutionException
     * @throws InterruptedException
     */
@Test
public void testWithDeletes() throws ExecutionException, InterruptedException {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard2");
    long timestamp = System.currentTimeMillis();
    DecoratedKey key = Util.dk("deletetest");
    for (int i = 0; i < 5; i++) new RowUpdateBuilder(store.metadata(), timestamp, 100, "deletetest").clustering("deletecolumn" + i).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
    new RowUpdateBuilder(store.metadata(), timestamp, 1000, "deletetest").clustering("todelete").add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
    store.forceBlockingFlush();
    assertEquals(1, store.getLiveSSTables().size());
    int ttltimestamp = (int) (System.currentTimeMillis() / 1000);
    int firstMaxDelTime = 0;
    for (SSTableReader sstable : store.getLiveSSTables()) {
        firstMaxDelTime = sstable.getSSTableMetadata().maxLocalDeletionTime;
        assertEquals(ttltimestamp + 1000, firstMaxDelTime, 10);
    }
    RowUpdateBuilder.deleteRow(store.metadata(), timestamp + 1, "deletetest", "todelete").applyUnsafe();
    store.forceBlockingFlush();
    assertEquals(2, store.getLiveSSTables().size());
    boolean foundDelete = false;
    for (SSTableReader sstable : store.getLiveSSTables()) {
        if (sstable.getSSTableMetadata().maxLocalDeletionTime != firstMaxDelTime) {
            assertEquals(sstable.getSSTableMetadata().maxLocalDeletionTime, ttltimestamp, 10);
            foundDelete = true;
        }
    }
    assertTrue(foundDelete);
    Util.compact(store, store.getLiveSSTables());
    assertEquals(1, store.getLiveSSTables().size());
    for (SSTableReader sstable : store.getLiveSSTables()) {
        assertEquals(ttltimestamp + 100, sstable.getSSTableMetadata().maxLocalDeletionTime, 10);
    }
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) Keyspace(org.apache.cassandra.db.Keyspace) DecoratedKey(org.apache.cassandra.db.DecoratedKey) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Test(org.junit.Test)

Example 69 with SSTableReader

use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.

the class SSTableMetadataTest method testMaxMinComposites.

@Test
public void testMaxMinComposites() throws CharacterCodingException, ExecutionException, InterruptedException {
    /*
        creates two sstables, columns like this:
        ---------------------
        k   |a0:9|a1:8|..|a9:0
        ---------------------
        and
        ---------------------
        k2  |b0:9|b1:8|..|b9:0
        ---------------------
        meaning max columns are b9 and 9, min is a0 and 0
         */
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("StandardComposite2");
    for (int i = 0; i < 10; i++) {
        new RowUpdateBuilder(cfs.metadata(), 0, "k").clustering("a" + (9 - i), getBytes(i)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
    }
    cfs.forceBlockingFlush();
    for (int i = 0; i < 10; i++) {
        new RowUpdateBuilder(cfs.metadata(), 0, "k2").clustering("b" + (9 - i), getBytes(i)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
    }
    cfs.forceBlockingFlush();
    cfs.forceMajorCompaction();
    assertEquals(cfs.getLiveSSTables().size(), 1);
    for (SSTableReader sstable : cfs.getLiveSSTables()) {
        assertEquals("b9", ByteBufferUtil.string(sstable.getSSTableMetadata().maxClusteringValues.get(0)));
        assertEquals(9, ByteBufferUtil.toInt(sstable.getSSTableMetadata().maxClusteringValues.get(1)));
        assertEquals("a0", ByteBufferUtil.string(sstable.getSSTableMetadata().minClusteringValues.get(0)));
        assertEquals(0, ByteBufferUtil.toInt(sstable.getSSTableMetadata().minClusteringValues.get(1)));
    }
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) RowUpdateBuilder(org.apache.cassandra.db.RowUpdateBuilder) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Test(org.junit.Test)

Example 70 with SSTableReader

use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.

the class SSTableReaderTest method testGetPositionsForRanges.

@Test
public void testGetPositionsForRanges() {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard2");
    partitioner = store.getPartitioner();
    // insert data and compact to a single sstable
    CompactionManager.instance.disableAutoCompaction();
    for (int j = 0; j < 10; j++) {
        new RowUpdateBuilder(store.metadata(), j, String.valueOf(j)).clustering("0").add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
    }
    store.forceBlockingFlush();
    CompactionManager.instance.performMaximal(store, false);
    List<Range<Token>> ranges = new ArrayList<Range<Token>>();
    // 1 key
    ranges.add(new Range<>(t(0), t(1)));
    // 2 keys
    ranges.add(new Range<>(t(2), t(4)));
    // wrapping range from key to end
    ranges.add(new Range<>(t(6), partitioner.getMinimumToken()));
    // empty range (should be ignored)
    ranges.add(new Range<>(t(9), t(91)));
    // confirm that positions increase continuously
    SSTableReader sstable = store.getLiveSSTables().iterator().next();
    long previous = -1;
    for (Pair<Long, Long> section : sstable.getPositionsForRanges(ranges)) {
        assert previous <= section.left : previous + " ! < " + section.left;
        assert section.left < section.right : section.left + " ! < " + section.right;
        previous = section.right;
    }
}
Also used : LocalToken(org.apache.cassandra.dht.LocalPartitioner.LocalToken) Token(org.apache.cassandra.dht.Token) Range(org.apache.cassandra.dht.Range) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) Test(org.junit.Test)

Aggregations

SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)289 Test (org.junit.Test)159 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)91 LifecycleTransaction (org.apache.cassandra.db.lifecycle.LifecycleTransaction)55 Keyspace (org.apache.cassandra.db.Keyspace)49 File (java.io.File)45 UUID (java.util.UUID)28 Range (org.apache.cassandra.dht.Range)28 Directories (org.apache.cassandra.db.Directories)27 Token (org.apache.cassandra.dht.Token)24 RandomAccessFile (java.io.RandomAccessFile)22 AbstractTransactionalTest (org.apache.cassandra.utils.concurrent.AbstractTransactionalTest)20 ArrayList (java.util.ArrayList)18 ByteBuffer (java.nio.ByteBuffer)17 HashSet (java.util.HashSet)16 SchemaLoader.createKeyspace (org.apache.cassandra.SchemaLoader.createKeyspace)16 DecoratedKey (org.apache.cassandra.db.DecoratedKey)16 RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)16 CompactionController (org.apache.cassandra.db.compaction.CompactionController)14 CompactionIterator (org.apache.cassandra.db.compaction.CompactionIterator)13