Search in sources :

Example 16 with Descriptor

use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.

the class StreamSessionTest method mutateRepaired.

private static void mutateRepaired(SSTableReader sstable, long repairedAt, UUID pendingRepair) throws IOException {
    Descriptor descriptor = sstable.descriptor;
    descriptor.getMetadataSerializer().mutateRepaired(descriptor, repairedAt, pendingRepair);
    sstable.reloadSSTableMetadata();
}
Also used : Descriptor(org.apache.cassandra.io.sstable.Descriptor) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor)

Example 17 with Descriptor

use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.

the class CompactionStress method initCf.

ColumnFamilyStore initCf(StressProfile stressProfile, boolean loadSSTables) {
    generateTokens(stressProfile.seedStr, StorageService.instance.getTokenMetadata(), numTokens);
    CreateTableStatement.RawStatement createStatement = stressProfile.getCreateStatement();
    List<File> dataDirectories = getDataDirectories();
    ColumnFamilyStore cfs = StressCQLSSTableWriter.Builder.createOfflineTable(createStatement, Collections.EMPTY_LIST, dataDirectories);
    if (loadSSTables) {
        Directories.SSTableLister lister = cfs.getDirectories().sstableLister(Directories.OnTxnErr.IGNORE).skipTemporary(true);
        List<SSTableReader> sstables = new ArrayList<>();
        //Offline open sstables
        for (Map.Entry<Descriptor, Set<Component>> entry : lister.list().entrySet()) {
            Set<Component> components = entry.getValue();
            if (!components.contains(Component.DATA))
                continue;
            try {
                SSTableReader sstable = SSTableReader.openNoValidation(entry.getKey(), components, cfs);
                sstables.add(sstable);
            } catch (Exception e) {
                JVMStabilityInspector.inspectThrowable(e);
                System.err.println(String.format("Error Loading %s: %s", entry.getKey(), e.getMessage()));
            }
        }
        cfs.disableAutoCompaction();
        //Register with cfs
        cfs.addSSTables(sstables);
    }
    return cfs;
}
Also used : CreateTableStatement(org.apache.cassandra.cql3.statements.CreateTableStatement) Directories(org.apache.cassandra.db.Directories) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Descriptor(org.apache.cassandra.io.sstable.Descriptor) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) Component(org.apache.cassandra.io.sstable.Component) File(java.io.File)

Example 18 with Descriptor

use of org.apache.cassandra.io.sstable.Descriptor in project eiger by wlloyd.

the class DefsTest method testDropIndex.

@Test
public void testDropIndex() throws IOException, ExecutionException, InterruptedException, ConfigurationException {
    // insert some data.  save the sstable descriptor so we can make sure it's marked for delete after the drop
    RowMutation rm = new RowMutation("Keyspace6", ByteBufferUtil.bytes("k1"));
    rm.add(new QueryPath("Indexed1", null, ByteBufferUtil.bytes("notbirthdate")), ByteBufferUtil.bytes(1L), 0);
    rm.add(new QueryPath("Indexed1", null, ByteBufferUtil.bytes("birthdate")), ByteBufferUtil.bytes(1L), 0);
    rm.apply();
    ColumnFamilyStore cfs = Table.open("Keyspace6").getColumnFamilyStore("Indexed1");
    cfs.forceBlockingFlush();
    ColumnFamilyStore indexedCfs = cfs.indexManager.getIndexForColumn(cfs.indexManager.getIndexedColumns().iterator().next()).getIndexCfs();
    Descriptor desc = indexedCfs.getSSTables().iterator().next().descriptor;
    // drop the index
    // abusing rename to clone
    CFMetaData meta = CFMetaData.rename(cfs.metadata, cfs.metadata.cfName);
    ColumnDefinition cdOld = meta.getColumn_metadata().values().iterator().next();
    ColumnDefinition cdNew = new ColumnDefinition(cdOld.name, cdOld.getValidator(), null, null, null);
    meta.columnMetadata(Collections.singletonMap(cdOld.name, cdNew));
    UpdateColumnFamily update = new UpdateColumnFamily(meta.toAvro());
    update.apply();
    // check
    assert cfs.indexManager.getIndexedColumns().isEmpty();
    SSTableDeletingTask.waitForDeletions();
    assert !new File(desc.filenameFor(Component.DATA)).exists();
}
Also used : QueryPath(org.apache.cassandra.db.filter.QueryPath) UpdateColumnFamily(org.apache.cassandra.db.migration.UpdateColumnFamily) Descriptor(org.apache.cassandra.io.sstable.Descriptor) File(java.io.File) Test(org.junit.Test)

Example 19 with Descriptor

use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.

the class PerSSTableIndexWriterTest method testPartialIndexWrites.

@Test
public void testPartialIndexWrites() throws Exception {
    final int maxKeys = 100000, numParts = 4, partSize = maxKeys / numParts;
    final String keyFormat = "key%06d";
    final long timestamp = System.currentTimeMillis();
    ColumnFamilyStore cfs = Keyspace.open(KS_NAME).getColumnFamilyStore(CF_NAME);
    ColumnMetadata column = cfs.metadata().getColumn(UTF8Type.instance.decompose("age"));
    SASIIndex sasi = (SASIIndex) cfs.indexManager.getIndexByName(cfs.name + "_age");
    File directory = cfs.getDirectories().getDirectoryForNewSSTables();
    Descriptor descriptor = cfs.newSSTableDescriptor(directory);
    PerSSTableIndexWriter indexWriter = (PerSSTableIndexWriter) sasi.getFlushObserver(descriptor, OperationType.FLUSH);
    SortedMap<DecoratedKey, Row> expectedKeys = new TreeMap<>(DecoratedKey.comparator);
    for (int i = 0; i < maxKeys; i++) {
        ByteBuffer key = ByteBufferUtil.bytes(String.format(keyFormat, i));
        expectedKeys.put(cfs.metadata().partitioner.decorateKey(key), BTreeRow.singleCellRow(Clustering.EMPTY, BufferCell.live(column, timestamp, Int32Type.instance.decompose(i))));
    }
    indexWriter.begin();
    Iterator<Map.Entry<DecoratedKey, Row>> keyIterator = expectedKeys.entrySet().iterator();
    long position = 0;
    Set<String> segments = new HashSet<>();
    outer: for (; ; ) {
        for (int i = 0; i < partSize; i++) {
            if (!keyIterator.hasNext())
                break outer;
            Map.Entry<DecoratedKey, Row> key = keyIterator.next();
            indexWriter.startPartition(key.getKey(), position++);
            indexWriter.nextUnfilteredCluster(key.getValue());
        }
        PerSSTableIndexWriter.Index index = indexWriter.getIndex(column);
        OnDiskIndex segment = index.scheduleSegmentFlush(false).call();
        index.segments.add(Futures.immediateFuture(segment));
        segments.add(segment.getIndexPath());
    }
    for (String segment : segments) Assert.assertTrue(new File(segment).exists());
    String indexFile = indexWriter.indexes.get(column).filename(true);
    // final flush
    indexWriter.complete();
    for (String segment : segments) Assert.assertFalse(new File(segment).exists());
    OnDiskIndex index = new OnDiskIndex(new File(indexFile), Int32Type.instance, keyPosition -> {
        ByteBuffer key = ByteBufferUtil.bytes(String.format(keyFormat, keyPosition));
        return cfs.metadata().partitioner.decorateKey(key);
    });
    Assert.assertEquals(0, UTF8Type.instance.compare(index.minKey(), ByteBufferUtil.bytes(String.format(keyFormat, 0))));
    Assert.assertEquals(0, UTF8Type.instance.compare(index.maxKey(), ByteBufferUtil.bytes(String.format(keyFormat, maxKeys - 1))));
    Set<DecoratedKey> actualKeys = new HashSet<>();
    int count = 0;
    for (OnDiskIndex.DataTerm term : index) {
        RangeIterator<Long, Token> tokens = term.getTokens();
        while (tokens.hasNext()) {
            for (DecoratedKey key : tokens.next()) actualKeys.add(key);
        }
        Assert.assertEquals(count++, (int) Int32Type.instance.compose(term.getTerm()));
    }
    Assert.assertEquals(expectedKeys.size(), actualKeys.size());
    for (DecoratedKey key : expectedKeys.keySet()) Assert.assertTrue(actualKeys.contains(key));
    FileUtils.closeQuietly(index);
}
Also used : ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) SASIIndex(org.apache.cassandra.index.sasi.SASIIndex) SASIIndex(org.apache.cassandra.index.sasi.SASIIndex) DecoratedKey(org.apache.cassandra.db.DecoratedKey) ByteBuffer(java.nio.ByteBuffer) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Descriptor(org.apache.cassandra.io.sstable.Descriptor) BTreeRow(org.apache.cassandra.db.rows.BTreeRow) Row(org.apache.cassandra.db.rows.Row) File(java.io.File) Test(org.junit.Test)

Example 20 with Descriptor

use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.

the class CompressedInputStreamTest method testCompressedReadWith.

/**
     * @param valuesToCheck array of longs of range(0-999)
     * @throws Exception
     */
private void testCompressedReadWith(long[] valuesToCheck, boolean testTruncate, boolean testException, double minCompressRatio) throws Exception {
    assert valuesToCheck != null && valuesToCheck.length > 0;
    // write compressed data file of longs
    File parentDir = new File(System.getProperty("java.io.tmpdir"));
    Descriptor desc = new Descriptor(parentDir, "ks", "cf", 1);
    File tmp = new File(desc.filenameFor(Component.DATA));
    MetadataCollector collector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
    CompressionParams param = CompressionParams.snappy(32, minCompressRatio);
    Map<Long, Long> index = new HashMap<Long, Long>();
    try (CompressedSequentialWriter writer = new CompressedSequentialWriter(tmp, desc.filenameFor(Component.COMPRESSION_INFO), null, SequentialWriterOption.DEFAULT, param, collector)) {
        for (long l = 0L; l < 1000; l++) {
            index.put(l, writer.position());
            writer.writeLong(l);
        }
        writer.finish();
    }
    CompressionMetadata comp = CompressionMetadata.create(tmp.getAbsolutePath());
    List<Pair<Long, Long>> sections = new ArrayList<>();
    for (long l : valuesToCheck) {
        long position = index.get(l);
        sections.add(Pair.create(position, position + 8));
    }
    CompressionMetadata.Chunk[] chunks = comp.getChunksForSections(sections);
    long totalSize = comp.getTotalSizeForSections(sections);
    long expectedSize = 0;
    for (CompressionMetadata.Chunk c : chunks) expectedSize += c.length + 4;
    assertEquals(expectedSize, totalSize);
    // buffer up only relevant parts of file
    int size = 0;
    for (CompressionMetadata.Chunk c : chunks) // 4bytes CRC
    size += (c.length + 4);
    byte[] toRead = new byte[size];
    try (RandomAccessFile f = new RandomAccessFile(tmp, "r")) {
        int pos = 0;
        for (CompressionMetadata.Chunk c : chunks) {
            f.seek(c.offset);
            pos += f.read(toRead, pos, c.length + 4);
        }
    }
    if (testTruncate) {
        byte[] actuallyRead = new byte[50];
        System.arraycopy(toRead, 0, actuallyRead, 0, 50);
        toRead = actuallyRead;
    }
    // read buffer using CompressedInputStream
    CompressionInfo info = new CompressionInfo(chunks, param);
    if (testException) {
        testException(sections, info);
        return;
    }
    CompressedInputStream input = new CompressedInputStream(new ByteArrayInputStream(toRead), info, ChecksumType.CRC32, () -> 1.0);
    try (DataInputStream in = new DataInputStream(input)) {
        for (int i = 0; i < sections.size(); i++) {
            input.position(sections.get(i).left);
            long readValue = in.readLong();
            assertEquals("expected " + valuesToCheck[i] + " but was " + readValue, valuesToCheck[i], readValue);
        }
    }
}
Also used : CompressedSequentialWriter(org.apache.cassandra.io.compress.CompressedSequentialWriter) CompressionMetadata(org.apache.cassandra.io.compress.CompressionMetadata) ClusteringComparator(org.apache.cassandra.db.ClusteringComparator) CompressionInfo(org.apache.cassandra.streaming.compress.CompressionInfo) CompressionParams(org.apache.cassandra.schema.CompressionParams) CompressedInputStream(org.apache.cassandra.streaming.compress.CompressedInputStream) Descriptor(org.apache.cassandra.io.sstable.Descriptor) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) MetadataCollector(org.apache.cassandra.io.sstable.metadata.MetadataCollector) Pair(org.apache.cassandra.utils.Pair)

Aggregations

Descriptor (org.apache.cassandra.io.sstable.Descriptor)38 File (java.io.File)24 DatabaseDescriptor (org.apache.cassandra.config.DatabaseDescriptor)15 Test (org.junit.Test)12 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)9 Component (org.apache.cassandra.io.sstable.Component)8 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)8 TableMetadata (org.apache.cassandra.schema.TableMetadata)6 Pair (org.apache.cassandra.utils.Pair)6 IOException (java.io.IOException)5 Map (java.util.Map)4 Directories (org.apache.cassandra.db.Directories)4 Keyspace (org.apache.cassandra.db.Keyspace)4 PrintStream (java.io.PrintStream)3 ByteBuffer (java.nio.ByteBuffer)3 Set (java.util.Set)3 DecoratedKey (org.apache.cassandra.db.DecoratedKey)3 SerializationHeader (org.apache.cassandra.db.SerializationHeader)3 NonBlockingHashMap (org.cliffc.high_scale_lib.NonBlockingHashMap)3 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2