Search in sources :

Example 31 with Keyspace

use of org.apache.cassandra.db.Keyspace in project cassandra by apache.

the class TTLTest method testRecoverOverflowedExpirationWithScrub.

public void testRecoverOverflowedExpirationWithScrub(boolean simple, boolean clustering, boolean runScrub, boolean runSStableScrub, boolean reinsertOverflowedTTL) throws Throwable {
    if (reinsertOverflowedTTL) {
        assert runScrub || runSStableScrub;
    }
    createTable(simple, clustering);
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(currentTable());
    assertEquals(0, cfs.getLiveSSTables().size());
    copySSTablesToTableDir(currentTable(), simple, clustering);
    cfs.loadNewSSTables();
    if (runScrub) {
        cfs.scrub(true, false, true, reinsertOverflowedTTL, 1);
        if (reinsertOverflowedTTL) {
            if (simple)
                assertRows(execute("SELECT * from %s"), row(1, 1, 1), row(2, 2, null));
            else
                assertRows(execute("SELECT * from %s"), row(1, 1, set("v11", "v12", "v13", "v14")), row(2, 2, set("v21", "v22", "v23", "v24")));
            cfs.forceMajorCompaction();
            if (simple)
                assertRows(execute("SELECT * from %s"), row(1, 1, 1), row(2, 2, null));
            else
                assertRows(execute("SELECT * from %s"), row(1, 1, set("v11", "v12", "v13", "v14")), row(2, 2, set("v21", "v22", "v23", "v24")));
        } else {
            assertEmpty(execute("SELECT * from %s"));
        }
    }
    if (runSStableScrub) {
        // Necessary for testing
        System.setProperty(org.apache.cassandra.tools.Util.ALLOW_TOOL_REINIT_FOR_TEST, "true");
        try {
            ToolResult tool;
            if (reinsertOverflowedTTL)
                tool = ToolRunner.invokeClass(StandaloneScrubber.class, "-r", KEYSPACE, cfs.name);
            else
                tool = ToolRunner.invokeClass(StandaloneScrubber.class, KEYSPACE, cfs.name);
            tool.assertOnCleanExit();
            Assertions.assertThat(tool.getStdout()).contains("Pre-scrub sstables snapshotted into");
            if (reinsertOverflowedTTL)
                Assertions.assertThat(tool.getStdout()).contains("Fixed 2 rows with overflowed local deletion time.");
            else
                Assertions.assertThat(tool.getStdout()).contains("No valid partitions found while scrubbing");
        } finally {
            System.clearProperty(org.apache.cassandra.tools.Util.ALLOW_TOOL_REINIT_FOR_TEST);
        }
    }
    try {
        cfs.truncateBlocking();
        dropTable("DROP TABLE %s");
    } catch (Throwable e) {
    // StandaloneScrubber.class should be ran as a tool with a stable env. In a test env there are things moving
    // under its feet such as the async CQLTester.afterTest() operations. We try to sync cleanup of tables here
    // but we need to catch any exceptions we might run into bc of the hack. See CASSANDRA-16546
    }
}
Also used : Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) ToolResult(org.apache.cassandra.tools.ToolRunner.ToolResult)

Example 32 with Keyspace

use of org.apache.cassandra.db.Keyspace in project cassandra by apache.

the class TTLTest method testCapExpirationDateOverflowPolicy.

public void testCapExpirationDateOverflowPolicy(boolean simple, boolean clustering, boolean flush) throws Throwable {
    // Create Table
    createTable(simple, clustering);
    // Insert data with INSERT and UPDATE
    if (simple) {
        execute("INSERT INTO %s (k, a) VALUES (?, ?) USING TTL " + MAX_TTL, 2, 2);
        if (clustering)
            execute("UPDATE %s USING TTL " + MAX_TTL + " SET b = 1 WHERE k = 1 AND a = 1;");
        else
            execute("UPDATE %s USING TTL " + MAX_TTL + " SET a = 1, b = 1 WHERE k = 1;");
    } else {
        execute("INSERT INTO %s (k, a, b) VALUES (?, ?, ?) USING TTL " + MAX_TTL, 2, 2, set("v21", "v22", "v23", "v24"));
        if (clustering)
            execute("UPDATE  %s USING TTL " + MAX_TTL + " SET b = ? WHERE k = 1 AND a = 1;", set("v11", "v12", "v13", "v14"));
        else
            execute("UPDATE  %s USING TTL " + MAX_TTL + " SET a = 1, b = ? WHERE k = 1;", set("v11", "v12", "v13", "v14"));
    }
    // Maybe Flush
    Keyspace ks = Keyspace.open(keyspace());
    if (flush)
        FBUtilities.waitOnFutures(ks.flush());
    // Verify data
    verifyData(simple);
    // Maybe major compact
    if (flush) {
        // Major compact and check data is still present
        ks.getColumnFamilyStore(currentTable()).forceMajorCompaction();
        // Verify data again
        verifyData(simple);
    }
}
Also used : Keyspace(org.apache.cassandra.db.Keyspace)

Example 33 with Keyspace

use of org.apache.cassandra.db.Keyspace in project cassandra by apache.

the class DeleteTest method isMemtableEmpty.

/**
 * Checks if the memtable is empty or not
 * @return {@code true} if the memtable is empty, {@code false} otherwise.
 */
private boolean isMemtableEmpty() {
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(currentTable());
    return cfs.metric.allMemtablesLiveDataSize.getValue() == 0;
}
Also used : Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore)

Example 34 with Keyspace

use of org.apache.cassandra.db.Keyspace in project cassandra by apache.

the class CassandraStreamReceiver method sendThroughWritePath.

private void sendThroughWritePath(ColumnFamilyStore cfs, Collection<SSTableReader> readers) {
    boolean hasCdc = hasCDC(cfs);
    ColumnFilter filter = ColumnFilter.all(cfs.metadata());
    for (SSTableReader reader : readers) {
        Keyspace ks = Keyspace.open(reader.getKeyspaceName());
        // ({@link Stream MAX_ROWS_PER_BATCH}) to avoid OOMing and generating heap pressure
        try (ISSTableScanner scanner = reader.getScanner();
            CloseableIterator<UnfilteredRowIterator> throttledPartitions = ThrottledUnfilteredIterator.throttle(scanner, MAX_ROWS_PER_BATCH)) {
            while (throttledPartitions.hasNext()) {
                // MV *can* be applied unsafe if there's no CDC on the CFS as we flush
                // before transaction is done.
                // 
                // If the CFS has CDC, however, these updates need to be written to the CommitLog
                // so they get archived into the cdc_raw folder
                ks.apply(new Mutation(PartitionUpdate.fromIterator(throttledPartitions.next(), filter)), hasCdc, true, false);
            }
        }
    }
}
Also used : ISSTableScanner(org.apache.cassandra.io.sstable.ISSTableScanner) UnfilteredRowIterator(org.apache.cassandra.db.rows.UnfilteredRowIterator) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFilter(org.apache.cassandra.db.filter.ColumnFilter) Mutation(org.apache.cassandra.db.Mutation)

Example 35 with Keyspace

use of org.apache.cassandra.db.Keyspace in project cassandra by apache.

the class RepairJob method run.

/**
 * Runs repair job.
 *
 * This sets up necessary task and runs them on given {@code taskExecutor}.
 * After submitting all tasks, waits until validation with replica completes.
 */
@SuppressWarnings("UnstableApiUsage")
public void run() {
    Keyspace ks = Keyspace.open(desc.keyspace);
    ColumnFamilyStore cfs = ks.getColumnFamilyStore(desc.columnFamily);
    cfs.metric.repairsStarted.inc();
    List<InetAddressAndPort> allEndpoints = new ArrayList<>(session.commonRange.endpoints);
    allEndpoints.add(FBUtilities.getBroadcastAddressAndPort());
    Future<List<TreeResponse>> treeResponses;
    // Create a snapshot at all nodes unless we're using pure parallel repairs
    if (parallelismDegree != RepairParallelism.PARALLEL) {
        Future<List<InetAddressAndPort>> allSnapshotTasks;
        if (session.isIncremental) {
            // consistent repair does it's own "snapshotting"
            allSnapshotTasks = ImmediateFuture.success(allEndpoints);
        } else {
            // Request snapshot to all replica
            List<Future<InetAddressAndPort>> snapshotTasks = new ArrayList<>(allEndpoints.size());
            for (InetAddressAndPort endpoint : allEndpoints) {
                SnapshotTask snapshotTask = new SnapshotTask(desc, endpoint);
                snapshotTasks.add(snapshotTask);
                taskExecutor.execute(snapshotTask);
            }
            allSnapshotTasks = FutureCombiner.allOf(snapshotTasks);
        }
        // When all snapshot complete, send validation requests
        treeResponses = allSnapshotTasks.flatMap(endpoints -> {
            if (parallelismDegree == RepairParallelism.SEQUENTIAL)
                return sendSequentialValidationRequest(endpoints);
            else
                return sendDCAwareValidationRequest(endpoints);
        }, taskExecutor);
    } else {
        // If not sequential, just send validation request to all replica
        treeResponses = sendValidationRequest(allEndpoints);
    }
    // When all validations complete, submit sync tasks
    Future<List<SyncStat>> syncResults = treeResponses.flatMap(session.optimiseStreams && !session.pullRepair ? this::optimisedSyncing : this::standardSyncing, taskExecutor);
    // When all sync complete, set the final result
    syncResults.addCallback(new FutureCallback<List<SyncStat>>() {

        public void onSuccess(List<SyncStat> stats) {
            if (!session.previewKind.isPreview()) {
                logger.info("{} {}.{} is fully synced", session.previewKind.logPrefix(session.getId()), desc.keyspace, desc.columnFamily);
                SystemDistributedKeyspace.successfulRepairJob(session.getId(), desc.keyspace, desc.columnFamily);
            }
            cfs.metric.repairsCompleted.inc();
            trySuccess(new RepairResult(desc, stats));
        }

        /**
         * Snapshot, validation and sync failures are all handled here
         */
        public void onFailure(Throwable t) {
            // Make sure all validation tasks have cleaned up the off-heap Merkle trees they might contain.
            validationTasks.forEach(ValidationTask::abort);
            if (!session.previewKind.isPreview()) {
                logger.warn("{} {}.{} sync failed", session.previewKind.logPrefix(session.getId()), desc.keyspace, desc.columnFamily);
                SystemDistributedKeyspace.failedRepairJob(session.getId(), desc.keyspace, desc.columnFamily, t);
            }
            cfs.metric.repairsCompleted.inc();
            tryFailure(t instanceof NoSuchRepairSessionExceptionWrapper ? ((NoSuchRepairSessionExceptionWrapper) t).wrapped : t);
        }
    }, taskExecutor);
}
Also used : InetAddressAndPort(org.apache.cassandra.locator.InetAddressAndPort) java.util(java.util) MerkleTrees(org.apache.cassandra.utils.MerkleTrees) LoggerFactory(org.slf4j.LoggerFactory) Global.currentTimeMillis(org.apache.cassandra.utils.Clock.Global.currentTimeMillis) Range(org.apache.cassandra.dht.Range) com.google.common.util.concurrent(com.google.common.util.concurrent) Function(java.util.function.Function) SystemDistributedKeyspace(org.apache.cassandra.schema.SystemDistributedKeyspace) PreferedNodeFilter(org.apache.cassandra.repair.asymmetric.PreferedNodeFilter) ActiveRepairService(org.apache.cassandra.service.ActiveRepairService) Token(org.apache.cassandra.dht.Token) DifferenceHolder(org.apache.cassandra.repair.asymmetric.DifferenceHolder) Pair(org.apache.cassandra.utils.Pair) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) Keyspace(org.apache.cassandra.db.Keyspace) ReduceHelper(org.apache.cassandra.repair.asymmetric.ReduceHelper) FutureCombiner(org.apache.cassandra.utils.concurrent.FutureCombiner) Logger(org.slf4j.Logger) FBUtilities(org.apache.cassandra.utils.FBUtilities) AsyncFuture(org.apache.cassandra.utils.concurrent.AsyncFuture) ImmutableMap(com.google.common.collect.ImmutableMap) Predicate(java.util.function.Predicate) Tracing(org.apache.cassandra.tracing.Tracing) Collectors(java.util.stream.Collectors) ExecutorPlus(org.apache.cassandra.concurrent.ExecutorPlus) PreviewKind(org.apache.cassandra.streaming.PreviewKind) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Future(org.apache.cassandra.utils.concurrent.Future) ImmediateFuture(org.apache.cassandra.utils.concurrent.ImmediateFuture) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) HostDifferences(org.apache.cassandra.repair.asymmetric.HostDifferences) InetAddressAndPort(org.apache.cassandra.locator.InetAddressAndPort) SystemDistributedKeyspace(org.apache.cassandra.schema.SystemDistributedKeyspace) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) AsyncFuture(org.apache.cassandra.utils.concurrent.AsyncFuture) Future(org.apache.cassandra.utils.concurrent.Future) ImmediateFuture(org.apache.cassandra.utils.concurrent.ImmediateFuture)

Aggregations

Keyspace (org.apache.cassandra.db.Keyspace)163 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)100 Test (org.junit.Test)73 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)66 RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)29 Token (org.apache.cassandra.dht.Token)28 LifecycleTransaction (org.apache.cassandra.db.lifecycle.LifecycleTransaction)27 ArrayList (java.util.ArrayList)18 DecoratedKey (org.apache.cassandra.db.DecoratedKey)17 ByteBuffer (java.nio.ByteBuffer)13 SystemKeyspace (org.apache.cassandra.db.SystemKeyspace)13 CompactionController (org.apache.cassandra.db.compaction.CompactionController)13 TableMetadata (org.apache.cassandra.schema.TableMetadata)13 CompactionIterator (org.apache.cassandra.db.compaction.CompactionIterator)12 Range (org.apache.cassandra.dht.Range)12 AbstractReplicationStrategy (org.apache.cassandra.locator.AbstractReplicationStrategy)12 InetAddressAndPort (org.apache.cassandra.locator.InetAddressAndPort)12 IOException (java.io.IOException)11 List (java.util.List)11 Map (java.util.Map)11