Search in sources :

Example 91 with Keyspace

use of org.apache.cassandra.db.Keyspace in project cassandra by apache.

the class ViewFilteringTest method complexRestrictedTimestampUpdateTest.

public void complexRestrictedTimestampUpdateTest(boolean flush) throws Throwable {
    createTable("CREATE TABLE %s (a int, b int, c int, d int, e int, PRIMARY KEY (a, b))");
    execute("USE " + keyspace());
    executeNet(protocolVersion, "USE " + keyspace());
    Keyspace ks = Keyspace.open(keyspace());
    createView("mv", "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %%s WHERE a IS NOT NULL AND b IS NOT NULL AND c IS NOT NULL AND c = 1 PRIMARY KEY (c, a, b)");
    ks.getColumnFamilyStore("mv").disableAutoCompaction();
    //Set initial values TS=0, matching the restriction and verify view
    executeNet(protocolVersion, "INSERT INTO %s (a, b, c, d) VALUES (0, 0, 1, 0) USING TIMESTAMP 0");
    assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0));
    if (flush)
        FBUtilities.waitOnFutures(ks.flush());
    //update c's timestamp TS=2
    executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 2 SET c = ? WHERE a = ? and b = ? ", 1, 0, 0);
    assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0));
    if (flush)
        FBUtilities.waitOnFutures(ks.flush());
    //change c's value and TS=3, tombstones c=1 and adds c=0 record
    executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 3 SET c = ? WHERE a = ? and b = ? ", 0, 0, 0);
    assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 0, 0, 0));
    if (flush) {
        ks.getColumnFamilyStore("mv").forceMajorCompaction();
        FBUtilities.waitOnFutures(ks.flush());
    }
    //change c's value back to 1 with TS=4, check we can see d
    executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 4 SET c = ? WHERE a = ? and b = ? ", 1, 0, 0);
    if (flush) {
        ks.getColumnFamilyStore("mv").forceMajorCompaction();
        FBUtilities.waitOnFutures(ks.flush());
    }
    assertRows(execute("SELECT d, e from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0, null));
    //Add e value @ TS=1
    executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 1 SET e = ? WHERE a = ? and b = ? ", 1, 0, 0);
    assertRows(execute("SELECT d, e from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(0, 1));
    if (flush)
        FBUtilities.waitOnFutures(ks.flush());
    //Change d value @ TS=2
    executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 2 SET d = ? WHERE a = ? and b = ? ", 2, 0, 0);
    assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(2));
    if (flush)
        FBUtilities.waitOnFutures(ks.flush());
    //Change d value @ TS=3
    executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 3 SET d = ? WHERE a = ? and b = ? ", 1, 0, 0);
    assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row(1));
    //Tombstone c
    executeNet(protocolVersion, "DELETE FROM %s WHERE a = ? and b = ?", 0, 0);
    assertRows(execute("SELECT d from mv"));
    //Add back without D
    executeNet(protocolVersion, "INSERT INTO %s (a, b, c) VALUES (0, 0, 1)");
    //Make sure D doesn't pop back in.
    assertRows(execute("SELECT d from mv WHERE c = ? and a = ? and b = ?", 1, 0, 0), row((Object) null));
    //New partition
    // insert a row with timestamp 0
    executeNet(protocolVersion, "INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?) USING TIMESTAMP 0", 1, 0, 1, 0, 0);
    // overwrite pk and e with timestamp 1, but don't overwrite d
    executeNet(protocolVersion, "INSERT INTO %s (a, b, c, e) VALUES (?, ?, ?, ?) USING TIMESTAMP 1", 1, 0, 1, 0);
    // delete with timestamp 0 (which should only delete d)
    executeNet(protocolVersion, "DELETE FROM %s USING TIMESTAMP 0 WHERE a = ? AND b = ?", 1, 0);
    assertRows(execute("SELECT a, b, c, d, e from mv WHERE c = ? and a = ? and b = ?", 1, 1, 0), row(1, 0, 1, null, 0));
    executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 2 SET c = ? WHERE a = ? AND b = ?", 1, 1, 1);
    executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 3 SET c = ? WHERE a = ? AND b = ?", 1, 1, 0);
    assertRows(execute("SELECT a, b, c, d, e from mv WHERE c = ? and a = ? and b = ?", 1, 1, 0), row(1, 0, 1, null, 0));
    executeNet(protocolVersion, "UPDATE %s USING TIMESTAMP 3 SET d = ? WHERE a = ? AND b = ?", 0, 1, 0);
    assertRows(execute("SELECT a, b, c, d, e from mv WHERE c = ? and a = ? and b = ?", 1, 1, 0), row(1, 0, 1, 0, 0));
}
Also used : SystemKeyspace(org.apache.cassandra.db.SystemKeyspace) Keyspace(org.apache.cassandra.db.Keyspace)

Example 92 with Keyspace

use of org.apache.cassandra.db.Keyspace in project cassandra by apache.

the class SnapshotDeletingTest method testCompactionHook.

@Test
public void testCompactionHook() throws Exception {
    Assume.assumeTrue(FBUtilities.isWindows);
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF_STANDARD1);
    store.clearUnsafe();
    populate(10000);
    store.snapshot("snapshot1");
    // Confirm snapshot deletion fails. Sleep for a bit just to make sure the SnapshotDeletingTask has
    // time to run and fail.
    Thread.sleep(500);
    store.clearSnapshot("snapshot1");
    assertEquals(1, SnapshotDeletingTask.pendingDeletionCount());
    // Compact the cf and confirm that the executor's after hook calls rescheduleDeletion
    populate(20000);
    store.forceBlockingFlush();
    store.forceMajorCompaction();
    long start = System.currentTimeMillis();
    while (System.currentTimeMillis() - start < 1000 && SnapshotDeletingTask.pendingDeletionCount() > 0) {
        Thread.yield();
    }
    assertEquals(0, SnapshotDeletingTask.pendingDeletionCount());
}
Also used : Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Test(org.junit.Test)

Example 93 with Keyspace

use of org.apache.cassandra.db.Keyspace in project cassandra by apache.

the class IndexSummaryManagerTest method testCancelIndex.

@Test
public void testCancelIndex() throws Exception {
    String ksname = KEYSPACE1;
    // index interval of 8, no key caching
    String cfname = CF_STANDARDLOWiINTERVAL;
    Keyspace keyspace = Keyspace.open(ksname);
    final ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
    final int numSSTables = 4;
    int numRows = 256;
    createSSTables(ksname, cfname, numSSTables, numRows);
    final List<SSTableReader> sstables = new ArrayList<>(cfs.getLiveSSTables());
    for (SSTableReader sstable : sstables) sstable.overrideReadMeter(new RestorableMeter(100.0, 100.0));
    final long singleSummaryOffHeapSpace = sstables.get(0).getIndexSummaryOffHeapSize();
    // everything should get cut in half
    final AtomicReference<CompactionInterruptedException> exception = new AtomicReference<>();
    // barrier to control when redistribution runs
    final CountDownLatch barrier = new CountDownLatch(1);
    Thread t = NamedThreadFactory.createThread(new Runnable() {

        public void run() {
            try {
                // Don't leave enough space for even the minimal index summaries
                try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN)) {
                    IndexSummaryManager.redistributeSummaries(new ObservableRedistribution(Collections.EMPTY_LIST, of(cfs.metadata.id, txn), singleSummaryOffHeapSpace, barrier));
                }
            } catch (CompactionInterruptedException ex) {
                exception.set(ex);
            } catch (IOException ignored) {
            }
        }
    });
    t.start();
    while (CompactionManager.instance.getActiveCompactions() == 0 && t.isAlive()) Thread.sleep(1);
    // to ensure that the stop condition check in IndexSummaryRedistribution::redistributeSummaries
    // is made *after* the halt request is made to the CompactionManager, don't allow the redistribution
    // to proceed until stopCompaction has been called.
    CompactionManager.instance.stopCompaction("INDEX_SUMMARY");
    // allows the redistribution to proceed
    barrier.countDown();
    t.join();
    assertNotNull("Expected compaction interrupted exception", exception.get());
    assertTrue("Expected no active compactions", CompactionMetrics.getCompactions().isEmpty());
    Set<SSTableReader> beforeRedistributionSSTables = new HashSet<>(sstables);
    Set<SSTableReader> afterCancelSSTables = new HashSet<>(cfs.getLiveSSTables());
    Set<SSTableReader> disjoint = Sets.symmetricDifference(beforeRedistributionSSTables, afterCancelSSTables);
    assertTrue(String.format("Mismatched files before and after cancelling redistribution: %s", Joiner.on(",").join(disjoint)), disjoint.isEmpty());
    validateData(cfs, numRows);
}
Also used : CompactionInterruptedException(org.apache.cassandra.db.compaction.CompactionInterruptedException) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) AtomicReference(java.util.concurrent.atomic.AtomicReference) IOException(java.io.IOException) RestorableMeter(org.apache.cassandra.metrics.RestorableMeter) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Test(org.junit.Test)

Example 94 with Keyspace

use of org.apache.cassandra.db.Keyspace in project cassandra by apache.

the class SSTableRewriterTest method testSSTableSectionsForRanges.

@Test
public void testSSTableSectionsForRanges() throws IOException, InterruptedException, ExecutionException {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    final ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
    truncate(cfs);
    cfs.addSSTable(writeFile(cfs, 1000));
    Collection<SSTableReader> allSSTables = cfs.getLiveSSTables();
    assertEquals(1, allSSTables.size());
    final Token firstToken = allSSTables.iterator().next().first.getToken();
    DatabaseDescriptor.setSSTablePreempiveOpenIntervalInMB(1);
    List<StreamSession.SSTableStreamingSections> sectionsBeforeRewrite = StreamSession.getSSTableSectionsForRanges(Collections.singleton(new Range<Token>(firstToken, firstToken)), Collections.singleton(cfs), 0L, null);
    assertEquals(1, sectionsBeforeRewrite.size());
    for (StreamSession.SSTableStreamingSections section : sectionsBeforeRewrite) section.ref.release();
    final AtomicInteger checkCount = new AtomicInteger();
    // needed since we get notified when compaction is done as well - we can't get sections for ranges for obsoleted sstables
    final AtomicBoolean done = new AtomicBoolean(false);
    final AtomicBoolean failed = new AtomicBoolean(false);
    Runnable r = new Runnable() {

        public void run() {
            while (!done.get()) {
                Set<Range<Token>> range = Collections.singleton(new Range<Token>(firstToken, firstToken));
                List<StreamSession.SSTableStreamingSections> sections = StreamSession.getSSTableSectionsForRanges(range, Collections.singleton(cfs), 0L, null);
                if (sections.size() != 1)
                    failed.set(true);
                for (StreamSession.SSTableStreamingSections section : sections) section.ref.release();
                checkCount.incrementAndGet();
                Uninterruptibles.sleepUninterruptibly(5, TimeUnit.MILLISECONDS);
            }
        }
    };
    Thread t = NamedThreadFactory.createThread(r);
    try {
        t.start();
        cfs.forceMajorCompaction();
    // reset
    } finally {
        DatabaseDescriptor.setSSTablePreempiveOpenIntervalInMB(50);
        done.set(true);
        t.join(20);
    }
    assertFalse(failed.get());
    assertTrue(checkCount.get() >= 2);
    truncate(cfs);
}
Also used : Token(org.apache.cassandra.dht.Token) Range(org.apache.cassandra.dht.Range) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) StreamSession(org.apache.cassandra.streaming.StreamSession) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Test(org.junit.Test)

Example 95 with Keyspace

use of org.apache.cassandra.db.Keyspace in project cassandra by apache.

the class Schema method reload.

private void reload(KeyspaceMetadata previous, KeyspaceMetadata updated) {
    Keyspace keyspace = getKeyspaceInstance(updated.name);
    if (keyspace != null)
        keyspace.setMetadata(updated);
    MapDifference<TableId, TableMetadata> tablesDiff = previous.tables.diff(updated.tables);
    MapDifference<TableId, ViewMetadata> viewsDiff = previous.views.diff(updated.views);
    MapDifference<String, TableMetadata> indexesDiff = previous.tables.indexesDiff(updated.tables);
    // clean up after removed entries
    tablesDiff.entriesOnlyOnLeft().values().forEach(table -> metadataRefs.remove(table.id));
    viewsDiff.entriesOnlyOnLeft().values().forEach(view -> metadataRefs.remove(view.metadata.id));
    indexesDiff.entriesOnlyOnLeft().values().forEach(indexTable -> indexMetadataRefs.remove(Pair.create(indexTable.keyspace, indexTable.indexName().get())));
    // load up new entries
    tablesDiff.entriesOnlyOnRight().values().forEach(table -> metadataRefs.put(table.id, new TableMetadataRef(table)));
    viewsDiff.entriesOnlyOnRight().values().forEach(view -> metadataRefs.put(view.metadata.id, new TableMetadataRef(view.metadata)));
    indexesDiff.entriesOnlyOnRight().values().forEach(indexTable -> indexMetadataRefs.put(Pair.create(indexTable.keyspace, indexTable.indexName().get()), new TableMetadataRef(indexTable)));
    // refresh refs to updated ones
    tablesDiff.entriesDiffering().values().forEach(diff -> metadataRefs.get(diff.rightValue().id).set(diff.rightValue()));
    viewsDiff.entriesDiffering().values().forEach(diff -> metadataRefs.get(diff.rightValue().metadata.id).set(diff.rightValue().metadata));
    indexesDiff.entriesDiffering().values().stream().map(MapDifference.ValueDifference::rightValue).forEach(indexTable -> indexMetadataRefs.get(Pair.create(indexTable.keyspace, indexTable.indexName().get())).set(indexTable));
}
Also used : MapDifference(com.google.common.collect.MapDifference) SystemKeyspace(org.apache.cassandra.db.SystemKeyspace) Keyspace(org.apache.cassandra.db.Keyspace)

Aggregations

Keyspace (org.apache.cassandra.db.Keyspace)176 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)107 Test (org.junit.Test)77 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)66 Token (org.apache.cassandra.dht.Token)38 RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)29 LifecycleTransaction (org.apache.cassandra.db.lifecycle.LifecycleTransaction)27 DecoratedKey (org.apache.cassandra.db.DecoratedKey)23 Range (org.apache.cassandra.dht.Range)22 ArrayList (java.util.ArrayList)21 TableMetadata (org.apache.cassandra.schema.TableMetadata)21 InetAddressAndPort (org.apache.cassandra.locator.InetAddressAndPort)20 HashMap (java.util.HashMap)19 List (java.util.List)19 SystemKeyspace (org.apache.cassandra.db.SystemKeyspace)18 Set (java.util.Set)17 AbstractReplicationStrategy (org.apache.cassandra.locator.AbstractReplicationStrategy)17 Map (java.util.Map)16 DatabaseDescriptor (org.apache.cassandra.config.DatabaseDescriptor)16 Collectors (java.util.stream.Collectors)15