Search in sources :

Example 1 with TableMetadataRef

use of org.apache.cassandra.schema.TableMetadataRef in project cassandra by apache.

the class MockSchema method newCFS.

public static ColumnFamilyStore newCFS(String ksname) {
    String cfname = "mockcf" + (id.incrementAndGet());
    TableMetadata metadata = newTableMetadata(ksname, cfname);
    return new ColumnFamilyStore(ks, cfname, 0, new TableMetadataRef(metadata), new Directories(metadata), false, false, false);
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) TableMetadataRef(org.apache.cassandra.schema.TableMetadataRef)

Example 2 with TableMetadataRef

use of org.apache.cassandra.schema.TableMetadataRef in project cassandra by apache.

the class CassandraIndex method setMetadata.

private void setMetadata(IndexMetadata indexDef) {
    metadata = indexDef;
    Pair<ColumnMetadata, IndexTarget.Type> target = TargetParser.parse(baseCfs.metadata(), indexDef);
    functions = getFunctions(indexDef, target);
    TableMetadataRef tableRef = TableMetadataRef.forOfflineTools(indexCfsMetadata(baseCfs.metadata(), indexDef));
    indexCfs = ColumnFamilyStore.createColumnFamilyStore(baseCfs.keyspace, tableRef.name, tableRef, baseCfs.getTracker().loadsstables);
    indexedColumn = target.left;
}
Also used : ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) AbstractType(org.apache.cassandra.db.marshal.AbstractType) CollectionType(org.apache.cassandra.db.marshal.CollectionType) TableMetadataRef(org.apache.cassandra.schema.TableMetadataRef)

Example 3 with TableMetadataRef

use of org.apache.cassandra.schema.TableMetadataRef in project cassandra by apache.

the class KeyCacheCqlTest method test2iKeyCachePathsSaveKeysForDroppedTable.

private void test2iKeyCachePathsSaveKeysForDroppedTable() throws Throwable {
    String table = createTable("CREATE TABLE %s (" + commonColumnsDef + "PRIMARY KEY ((part_key_a, part_key_b),clust_key_a,clust_key_b,clust_key_c))");
    String indexName = createIndex("CREATE INDEX ON %s (col_int)");
    insertData(table, indexName, true);
    clearCache();
    CacheMetrics metrics = CacheService.instance.keyCache.getMetrics();
    long expectedNumberOfRequests = 0;
    for (int i = 0; i < 10; i++) {
        UntypedResultSet result = execute("SELECT part_key_a FROM %s WHERE col_int = ?", i);
        assertEquals(500, result.size());
        // Index requests and table requests are both added to the same metric
        // We expect 10 requests on the index SSTables and 10 IN requests on the table SSTables + BF false positives
        expectedNumberOfRequests += recentBloomFilterFalsePositives() + 20;
    }
    long hits = metrics.hits.getCount();
    long requests = metrics.requests.getCount();
    assertEquals(0, hits);
    assertEquals(expectedNumberOfRequests, requests);
    for (int i = 0; i < 10; i++) {
        UntypedResultSet result = execute("SELECT part_key_a FROM %s WHERE col_int = ?", i);
        // 100 part-keys * 50 clust-keys
        // indexed on part-key % 10 = 10 index partitions
        // (50 clust-keys  *  100-part-keys  /  10 possible index-values) = 500
        assertEquals(500, result.size());
        // Index requests and table requests are both added to the same metric
        // We expect 10 requests on the index SSTables and 10 IN requests on the table SSTables + BF false positives
        expectedNumberOfRequests += recentBloomFilterFalsePositives() + 20;
    }
    metrics = CacheService.instance.keyCache.getMetrics();
    hits = metrics.hits.getCount();
    requests = metrics.requests.getCount();
    assertEquals(200, hits);
    assertEquals(expectedNumberOfRequests, requests);
    dropTable("DROP TABLE %s");
    CacheService.instance.keyCache.submitWrite(Integer.MAX_VALUE).get();
    CacheService.instance.keyCache.clear();
    Assert.assertEquals(0, CacheService.instance.keyCache.size());
    // then load saved
    CacheService.instance.keyCache.loadSaved();
    Iterator<KeyCacheKey> iter = CacheService.instance.keyCache.keyIterator();
    while (iter.hasNext()) {
        KeyCacheKey key = iter.next();
        TableMetadataRef tableMetadataRef = Schema.instance.getTableMetadataRef(key.tableId);
        Assert.assertFalse(tableMetadataRef.keyspace.equals("KEYSPACE_PER_TEST"));
        Assert.assertFalse(tableMetadataRef.name.startsWith(table));
    }
}
Also used : CacheMetrics(org.apache.cassandra.metrics.CacheMetrics) TableMetadataRef(org.apache.cassandra.schema.TableMetadataRef) KeyCacheKey(org.apache.cassandra.cache.KeyCacheKey)

Example 4 with TableMetadataRef

use of org.apache.cassandra.schema.TableMetadataRef in project cassandra by apache.

the class RealTransactionsTest method replaceSSTable.

private SSTableReader replaceSSTable(ColumnFamilyStore cfs, LifecycleTransaction txn, boolean fail) {
    List<SSTableReader> newsstables = null;
    int nowInSec = FBUtilities.nowInSeconds();
    try (CompactionController controller = new CompactionController(cfs, txn.originals(), cfs.gcBefore(FBUtilities.nowInSeconds()))) {
        try (SSTableRewriter rewriter = SSTableRewriter.constructKeepingOriginals(txn, false, 1000);
            AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategyManager().getScanners(txn.originals());
            CompactionIterator ci = new CompactionIterator(txn.opType(), scanners.scanners, controller, nowInSec, txn.opId())) {
            long lastCheckObsoletion = nanoTime();
            File directory = txn.originals().iterator().next().descriptor.directory;
            Descriptor desc = cfs.newSSTableDescriptor(directory);
            TableMetadataRef metadata = Schema.instance.getTableMetadataRef(desc);
            rewriter.switchWriter(SSTableWriter.create(metadata, desc, 0, 0, null, false, 0, SerializationHeader.make(cfs.metadata(), txn.originals()), cfs.indexManager.listIndexes(), txn));
            while (ci.hasNext()) {
                rewriter.append(ci.next());
                if (nanoTime() - lastCheckObsoletion > TimeUnit.MINUTES.toNanos(1L)) {
                    controller.maybeRefreshOverlaps();
                    lastCheckObsoletion = nanoTime();
                }
            }
            if (!fail)
                newsstables = rewriter.finish();
            else
                rewriter.abort();
        }
    }
    assertTrue(fail || newsstables != null);
    if (newsstables != null) {
        Assert.assertEquals(1, newsstables.size());
        return newsstables.iterator().next();
    }
    return null;
}
Also used : CompactionController(org.apache.cassandra.db.compaction.CompactionController) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) AbstractCompactionStrategy(org.apache.cassandra.db.compaction.AbstractCompactionStrategy) CompactionIterator(org.apache.cassandra.db.compaction.CompactionIterator) TableMetadataRef(org.apache.cassandra.schema.TableMetadataRef) Descriptor(org.apache.cassandra.io.sstable.Descriptor) SSTableRewriter(org.apache.cassandra.io.sstable.SSTableRewriter) File(org.apache.cassandra.io.util.File)

Example 5 with TableMetadataRef

use of org.apache.cassandra.schema.TableMetadataRef in project cassandra by apache.

the class BigTableZeroCopyWriterTest method writeDataTestCycle.

private void writeDataTestCycle(Function<ByteBuffer, DataInputPlus> bufferMapper) {
    File dir = store.getDirectories().getDirectoryForNewSSTables();
    Descriptor desc = store.newSSTableDescriptor(dir);
    TableMetadataRef metadata = Schema.instance.getTableMetadataRef(desc);
    LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.STREAM);
    Set<Component> componentsToWrite = ImmutableSet.of(Component.DATA, Component.PRIMARY_INDEX, Component.STATS);
    BigTableZeroCopyWriter btzcw = new BigTableZeroCopyWriter(desc, metadata, txn, componentsToWrite);
    for (Component component : componentsToWrite) {
        if (Files.exists(Paths.get(desc.filenameFor(component)))) {
            Pair<DataInputPlus, Long> pair = getSSTableComponentData(sstable, component, bufferMapper);
            try {
                btzcw.writeComponent(component.type, pair.left, pair.right);
            } catch (ClosedChannelException e) {
                throw new UncheckedIOException(e);
            }
        }
    }
    Collection<SSTableReader> readers = btzcw.finish(true);
    SSTableReader reader = readers.toArray(new SSTableReader[0])[0];
    assertNotEquals(sstable.getFilename(), reader.getFilename());
    assertEquals(sstable.estimatedKeys(), reader.estimatedKeys());
    assertEquals(sstable.isPendingRepair(), reader.isPendingRepair());
    assertRowCount(expectedRowCount);
}
Also used : ClosedChannelException(java.nio.channels.ClosedChannelException) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) UncheckedIOException(java.io.UncheckedIOException) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) TableMetadataRef(org.apache.cassandra.schema.TableMetadataRef) Descriptor(org.apache.cassandra.io.sstable.Descriptor) DataInputPlus(org.apache.cassandra.io.util.DataInputPlus) Component(org.apache.cassandra.io.sstable.Component) File(org.apache.cassandra.io.util.File)

Aggregations

TableMetadataRef (org.apache.cassandra.schema.TableMetadataRef)10 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)4 File (org.apache.cassandra.io.util.File)4 Range (org.apache.cassandra.dht.Range)3 Token (org.apache.cassandra.dht.Token)3 File (java.io.File)2 UntypedResultSet (org.apache.cassandra.cql3.UntypedResultSet)2 CQLSSTableWriter (org.apache.cassandra.io.sstable.CQLSSTableWriter)2 Descriptor (org.apache.cassandra.io.sstable.Descriptor)2 SSTableLoader (org.apache.cassandra.io.sstable.SSTableLoader)2 OutputHandler (org.apache.cassandra.utils.OutputHandler)2 IOException (java.io.IOException)1 UncheckedIOException (java.io.UncheckedIOException)1 InetAddress (java.net.InetAddress)1 ClosedChannelException (java.nio.channels.ClosedChannelException)1 KeyCacheKey (org.apache.cassandra.cache.KeyCacheKey)1 AbstractCompactionStrategy (org.apache.cassandra.db.compaction.AbstractCompactionStrategy)1 CompactionController (org.apache.cassandra.db.compaction.CompactionController)1 CompactionIterator (org.apache.cassandra.db.compaction.CompactionIterator)1 LifecycleTransaction (org.apache.cassandra.db.lifecycle.LifecycleTransaction)1