Search in sources :

Example 16 with Future

use of org.apache.cassandra.utils.concurrent.Future in project cassandra by apache.

the class CompactionManager method submitMaximal.

// the tasks are executed in parallel on the executor, making sure that they get closed
@SuppressWarnings("resource")
public List<Future<?>> submitMaximal(final ColumnFamilyStore cfStore, final int gcBefore, boolean splitOutput) {
    // here we compute the task off the compaction executor, so having that present doesn't
    // confuse runWithCompactionsDisabled -- i.e., we don't want to deadlock ourselves, waiting
    // for ourselves to finish/acknowledge cancellation before continuing.
    CompactionTasks tasks = cfStore.getCompactionStrategyManager().getMaximalTasks(gcBefore, splitOutput);
    if (tasks.isEmpty())
        return Collections.emptyList();
    List<Future<?>> futures = new ArrayList<>();
    int nonEmptyTasks = 0;
    for (final AbstractCompactionTask task : tasks) {
        if (task.transaction.originals().size() > 0)
            nonEmptyTasks++;
        Runnable runnable = new WrappedRunnable() {

            protected void runMayThrow() {
                task.execute(active);
            }
        };
        Future<?> fut = executor.submitIfRunning(runnable, "maximal task");
        if (!fut.isCancelled())
            futures.add(fut);
    }
    if (nonEmptyTasks > 1)
        logger.info("Major compaction will not result in a single sstable - repaired and unrepaired data is kept separate and compaction runs per data_file_directory.");
    return futures;
}
Also used : Future(org.apache.cassandra.utils.concurrent.Future) ImmediateFuture(org.apache.cassandra.utils.concurrent.ImmediateFuture) RangesAtEndpoint(org.apache.cassandra.locator.RangesAtEndpoint)

Example 17 with Future

use of org.apache.cassandra.utils.concurrent.Future in project cassandra by apache.

the class CompactionManager method parallelAllSSTableOperation.

/**
 * Run an operation over all sstables using jobs threads
 *
 * @param cfs the column family store to run the operation on
 * @param operation the operation to run
 * @param jobs the number of threads to use - 0 means use all available. It never uses more than concurrent_compactors threads
 * @return status of the operation
 * @throws ExecutionException
 * @throws InterruptedException
 */
@SuppressWarnings("resource")
private AllSSTableOpStatus parallelAllSSTableOperation(final ColumnFamilyStore cfs, final OneSSTableOperation operation, int jobs, OperationType operationType) throws ExecutionException, InterruptedException {
    logger.info("Starting {} for {}.{}", operationType, cfs.keyspace.getName(), cfs.getTableName());
    List<LifecycleTransaction> transactions = new ArrayList<>();
    List<Future<?>> futures = new ArrayList<>();
    try (LifecycleTransaction compacting = cfs.markAllCompacting(operationType)) {
        if (compacting == null)
            return AllSSTableOpStatus.UNABLE_TO_CANCEL;
        Iterable<SSTableReader> sstables = Lists.newArrayList(operation.filterSSTables(compacting));
        if (Iterables.isEmpty(sstables)) {
            logger.info("No sstables to {} for {}.{}", operationType.name(), cfs.keyspace.getName(), cfs.name);
            return AllSSTableOpStatus.SUCCESSFUL;
        }
        for (final SSTableReader sstable : sstables) {
            final LifecycleTransaction txn = compacting.split(singleton(sstable));
            transactions.add(txn);
            Callable<Object> callable = new Callable<Object>() {

                @Override
                public Object call() throws Exception {
                    operation.execute(txn);
                    return this;
                }
            };
            Future<?> fut = executor.submitIfRunning(callable, "paralell sstable operation");
            if (!fut.isCancelled())
                futures.add(fut);
            else
                return AllSSTableOpStatus.ABORTED;
            if (jobs > 0 && futures.size() == jobs) {
                Future<?> f = FBUtilities.waitOnFirstFuture(futures);
                futures.remove(f);
            }
        }
        FBUtilities.waitOnFutures(futures);
        assert compacting.originals().isEmpty();
        logger.info("Finished {} for {}.{} successfully", operationType, cfs.keyspace.getName(), cfs.getTableName());
        return AllSSTableOpStatus.SUCCESSFUL;
    } finally {
        // wait on any unfinished futures to make sure we don't close an ongoing transaction
        try {
            FBUtilities.waitOnFutures(futures);
        } catch (Throwable t) {
        // these are handled/logged in CompactionExecutor#afterExecute
        }
        Throwable fail = Throwables.close(null, transactions);
        if (fail != null)
            logger.error("Failed to cleanup lifecycle transactions ({} for {}.{})", operationType, cfs.keyspace.getName(), cfs.getTableName(), fail);
    }
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) ILifecycleTransaction(org.apache.cassandra.db.lifecycle.ILifecycleTransaction) WrappedLifecycleTransaction(org.apache.cassandra.db.lifecycle.WrappedLifecycleTransaction) Future(org.apache.cassandra.utils.concurrent.Future) ImmediateFuture(org.apache.cassandra.utils.concurrent.ImmediateFuture) Callable(java.util.concurrent.Callable)

Example 18 with Future

use of org.apache.cassandra.utils.concurrent.Future in project cassandra by apache.

the class RepairJob method sendDCAwareValidationRequest.

/**
 * Creates {@link ValidationTask} and submit them to task executor so that tasks run sequentially within each dc.
 */
private Future<List<TreeResponse>> sendDCAwareValidationRequest(Collection<InetAddressAndPort> endpoints) {
    String message = String.format("Requesting merkle trees for %s (to %s)", desc.columnFamily, endpoints);
    logger.info("{} {}", session.previewKind.logPrefix(desc.sessionId), message);
    Tracing.traceRepair(message);
    int nowInSec = getNowInSeconds();
    List<Future<TreeResponse>> tasks = new ArrayList<>(endpoints.size());
    Map<String, Queue<InetAddressAndPort>> requestsByDatacenter = new HashMap<>();
    for (InetAddressAndPort endpoint : endpoints) {
        String dc = DatabaseDescriptor.getEndpointSnitch().getDatacenter(endpoint);
        Queue<InetAddressAndPort> queue = requestsByDatacenter.get(dc);
        if (queue == null) {
            queue = new LinkedList<>();
            requestsByDatacenter.put(dc, queue);
        }
        queue.add(endpoint);
    }
    for (Map.Entry<String, Queue<InetAddressAndPort>> entry : requestsByDatacenter.entrySet()) {
        Queue<InetAddressAndPort> requests = entry.getValue();
        InetAddressAndPort address = requests.poll();
        ValidationTask firstTask = newValidationTask(address, nowInSec);
        logger.info("{} Validating {}", session.previewKind.logPrefix(session.getId()), address);
        session.trackValidationCompletion(Pair.create(desc, address), firstTask);
        tasks.add(firstTask);
        ValidationTask currentTask = firstTask;
        while (requests.size() > 0) {
            final InetAddressAndPort nextAddress = requests.poll();
            final ValidationTask nextTask = newValidationTask(nextAddress, nowInSec);
            tasks.add(nextTask);
            currentTask.addCallback(new FutureCallback<TreeResponse>() {

                public void onSuccess(TreeResponse result) {
                    logger.info("{} Validating {}", session.previewKind.logPrefix(session.getId()), nextAddress);
                    session.trackValidationCompletion(Pair.create(desc, nextAddress), nextTask);
                    taskExecutor.execute(nextTask);
                }

                // failure is handled at root of job chain
                public void onFailure(Throwable t) {
                }
            });
            currentTask = nextTask;
        }
        // start running tasks
        taskExecutor.execute(firstTask);
    }
    return FutureCombiner.allOf(tasks);
}
Also used : InetAddressAndPort(org.apache.cassandra.locator.InetAddressAndPort) AsyncFuture(org.apache.cassandra.utils.concurrent.AsyncFuture) Future(org.apache.cassandra.utils.concurrent.Future) ImmediateFuture(org.apache.cassandra.utils.concurrent.ImmediateFuture) ImmutableMap(com.google.common.collect.ImmutableMap)

Example 19 with Future

use of org.apache.cassandra.utils.concurrent.Future in project cassandra by apache.

the class RepairJob method sendSequentialValidationRequest.

/**
 * Creates {@link ValidationTask} and submit them to task executor so that tasks run sequentially.
 */
private Future<List<TreeResponse>> sendSequentialValidationRequest(Collection<InetAddressAndPort> endpoints) {
    String message = String.format("Requesting merkle trees for %s (to %s)", desc.columnFamily, endpoints);
    logger.info("{} {}", session.previewKind.logPrefix(desc.sessionId), message);
    Tracing.traceRepair(message);
    int nowInSec = getNowInSeconds();
    List<Future<TreeResponse>> tasks = new ArrayList<>(endpoints.size());
    Queue<InetAddressAndPort> requests = new LinkedList<>(endpoints);
    InetAddressAndPort address = requests.poll();
    ValidationTask firstTask = newValidationTask(address, nowInSec);
    logger.info("{} Validating {}", session.previewKind.logPrefix(desc.sessionId), address);
    session.trackValidationCompletion(Pair.create(desc, address), firstTask);
    tasks.add(firstTask);
    ValidationTask currentTask = firstTask;
    while (requests.size() > 0) {
        final InetAddressAndPort nextAddress = requests.poll();
        final ValidationTask nextTask = newValidationTask(nextAddress, nowInSec);
        tasks.add(nextTask);
        currentTask.addCallback(new FutureCallback<TreeResponse>() {

            public void onSuccess(TreeResponse result) {
                logger.info("{} Validating {}", session.previewKind.logPrefix(desc.sessionId), nextAddress);
                session.trackValidationCompletion(Pair.create(desc, nextAddress), nextTask);
                taskExecutor.execute(nextTask);
            }

            // failure is handled at root of job chain
            public void onFailure(Throwable t) {
            }
        });
        currentTask = nextTask;
    }
    // start running tasks
    taskExecutor.execute(firstTask);
    return FutureCombiner.allOf(tasks);
}
Also used : InetAddressAndPort(org.apache.cassandra.locator.InetAddressAndPort) AsyncFuture(org.apache.cassandra.utils.concurrent.AsyncFuture) Future(org.apache.cassandra.utils.concurrent.Future) ImmediateFuture(org.apache.cassandra.utils.concurrent.ImmediateFuture)

Example 20 with Future

use of org.apache.cassandra.utils.concurrent.Future in project cassandra by apache.

the class StorageService method drain.

protected synchronized void drain(boolean isFinalShutdown) throws IOException, InterruptedException, ExecutionException {
    ExecutorService counterMutationStage = Stage.COUNTER_MUTATION.executor();
    ExecutorService viewMutationStage = Stage.VIEW_MUTATION.executor();
    ExecutorService mutationStage = Stage.MUTATION.executor();
    if (mutationStage.isTerminated() && counterMutationStage.isTerminated() && viewMutationStage.isTerminated()) {
        if (!isFinalShutdown)
            logger.warn("Cannot drain node (did it already happen?)");
        return;
    }
    assert !isShutdown;
    isShutdown = true;
    Throwable preShutdownHookThrowable = Throwables.perform(null, preShutdownHooks.stream().map(h -> h::run));
    if (preShutdownHookThrowable != null)
        logger.error("Attempting to continue draining after pre-shutdown hooks returned exception", preShutdownHookThrowable);
    try {
        setMode(Mode.DRAINING, "starting drain process", !isFinalShutdown);
        try {
            /* not clear this is reasonable time, but propagated from prior embedded behaviour */
            BatchlogManager.instance.shutdownAndWait(1L, MINUTES);
        } catch (TimeoutException t) {
            logger.error("Batchlog manager timed out shutting down", t);
        }
        snapshotManager.stop();
        HintsService.instance.pauseDispatch();
        if (daemon != null)
            shutdownClientServers();
        ScheduledExecutors.optionalTasks.shutdown();
        Gossiper.instance.stop();
        ActiveRepairService.instance.stop();
        if (!isFinalShutdown)
            setMode(Mode.DRAINING, "shutting down MessageService", false);
        // before mutation stage, so we can get all the hints saved before shutting down.
        try {
            MessagingService.instance().shutdown();
        } catch (Throwable t) {
            // prevent messaging service timing out shutdown from aborting
            // drain process; otherwise drain and/or shutdown might throw
            logger.error("Messaging service timed out shutting down", t);
        }
        if (!isFinalShutdown)
            setMode(Mode.DRAINING, "clearing mutation stage", false);
        viewMutationStage.shutdown();
        counterMutationStage.shutdown();
        mutationStage.shutdown();
        // FIXME? should these *really* take up to one hour?
        viewMutationStage.awaitTermination(3600, TimeUnit.SECONDS);
        counterMutationStage.awaitTermination(3600, TimeUnit.SECONDS);
        mutationStage.awaitTermination(3600, TimeUnit.SECONDS);
        StorageProxy.instance.verifyNoHintsInProgress();
        if (!isFinalShutdown)
            setMode(Mode.DRAINING, "flushing column families", false);
        // we don't want to start any new compactions while we are draining
        disableAutoCompaction();
        // count CFs first, since forceFlush could block for the flushWriter to get a queue slot empty
        totalCFs = 0;
        for (Keyspace keyspace : Keyspace.nonSystem()) totalCFs += keyspace.getColumnFamilyStores().size();
        remainingCFs = totalCFs;
        // flush
        List<Future<?>> flushes = new ArrayList<>();
        for (Keyspace keyspace : Keyspace.nonSystem()) {
            for (ColumnFamilyStore cfs : keyspace.getColumnFamilyStores()) flushes.add(cfs.forceFlush());
        }
        // thus make several short ones "instant" if we wait for them later.
        for (Future f : flushes) {
            try {
                FBUtilities.waitOnFuture(f);
            } catch (Throwable t) {
                JVMStabilityInspector.inspectThrowable(t);
                // don't let this stop us from shutting down the commitlog and other thread pools
                logger.warn("Caught exception while waiting for memtable flushes during shutdown hook", t);
            }
            remainingCFs--;
        }
        // Interrupt ongoing compactions and shutdown CM to prevent further compactions.
        CompactionManager.instance.forceShutdown();
        // Flush the system tables after all other tables are flushed, just in case flushing modifies any system state
        // like CASSANDRA-5151. Don't bother with progress tracking since system data is tiny.
        // Flush system tables after stopping compactions since they modify
        // system tables (for example compactions can obsolete sstables and the tidiers in SSTableReader update
        // system tables, see SSTableReader.GlobalTidy)
        flushes.clear();
        for (Keyspace keyspace : Keyspace.system()) {
            for (ColumnFamilyStore cfs : keyspace.getColumnFamilyStores()) flushes.add(cfs.forceFlush());
        }
        FBUtilities.waitOnFutures(flushes);
        SnapshotManager.shutdownAndWait(1L, MINUTES);
        HintsService.instance.shutdownBlocking();
        // Interrupt ongoing compactions and shutdown CM to prevent further compactions.
        CompactionManager.instance.forceShutdown();
        // whilst we've flushed all the CFs, which will have recycled all completed segments, we want to ensure
        // there are no segments to replay, so we force the recycling of any remaining (should be at most one)
        CommitLog.instance.forceRecycleAllSegments();
        CommitLog.instance.shutdownBlocking();
        // wait for miscellaneous tasks like sstable and commitlog segment deletion
        ScheduledExecutors.nonPeriodicTasks.shutdown();
        if (!ScheduledExecutors.nonPeriodicTasks.awaitTermination(1, MINUTES))
            logger.warn("Unable to terminate non-periodic tasks within 1 minute.");
        ColumnFamilyStore.shutdownPostFlushExecutor();
        setMode(Mode.DRAINED, !isFinalShutdown);
    } catch (Throwable t) {
        logger.error("Caught an exception while draining ", t);
    } finally {
        Throwable postShutdownHookThrowable = Throwables.perform(null, postShutdownHooks.stream().map(h -> h::run));
        if (postShutdownHookThrowable != null)
            logger.error("Post-shutdown hooks returned exception", postShutdownHookThrowable);
    }
}
Also used : TraceKeyspace(org.apache.cassandra.tracing.TraceKeyspace) StorageMetrics(org.apache.cassandra.metrics.StorageMetrics) File(org.apache.cassandra.io.util.File) CommitLog(org.apache.cassandra.db.commitlog.CommitLog) AuthKeyspace(org.apache.cassandra.auth.AuthKeyspace) org.apache.cassandra.utils(org.apache.cassandra.utils) Global.nanoTime(org.apache.cassandra.utils.Clock.Global.nanoTime) StringUtils(org.apache.commons.lang3.StringUtils) TokenFactory(org.apache.cassandra.dht.Token.TokenFactory) VersionAndType(org.apache.cassandra.io.sstable.format.VersionAndType) InetAddress(java.net.InetAddress) SystemDistributedKeyspace(org.apache.cassandra.schema.SystemDistributedKeyspace) SecondaryIndexManager.isIndexColumnFamily(org.apache.cassandra.index.SecondaryIndexManager.isIndexColumnFamily) SecondaryIndexManager.getIndexName(org.apache.cassandra.index.SecondaryIndexManager.getIndexName) Arrays.asList(java.util.Arrays.asList) HintsService(org.apache.cassandra.hints.HintsService) JMXBroadcastExecutor(org.apache.cassandra.utils.progress.jmx.JMXBroadcastExecutor) MILLISECONDS(java.util.concurrent.TimeUnit.MILLISECONDS) FullQueryLoggerOptions(org.apache.cassandra.fql.FullQueryLoggerOptions) ViewMetadata(org.apache.cassandra.schema.ViewMetadata) Stream(java.util.stream.Stream) Predicate(com.google.common.base.Predicate) CassandraRelevantProperties(org.apache.cassandra.config.CassandraRelevantProperties) FullQueryLogger(org.apache.cassandra.fql.FullQueryLogger) Conflict(org.apache.cassandra.locator.ReplicaCollection.Builder.Conflict) KeyspaceMetadata(org.apache.cassandra.schema.KeyspaceMetadata) LoggingSupportFactory(org.apache.cassandra.utils.logging.LoggingSupportFactory) NoPayload.noPayload(org.apache.cassandra.net.NoPayload.noPayload) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ProgressEvent(org.apache.cassandra.utils.progress.ProgressEvent) java.util(java.util) OpenDataException(javax.management.openmbean.OpenDataException) CompactionManager(org.apache.cassandra.db.compaction.CompactionManager) SSTableLoader(org.apache.cassandra.io.sstable.SSTableLoader) ClientResourceLimits(org.apache.cassandra.transport.ClientResourceLimits) Global.currentTimeMillis(org.apache.cassandra.utils.Clock.Global.currentTimeMillis) Range(org.apache.cassandra.dht.Range) MINUTES(java.util.concurrent.TimeUnit.MINUTES) QueryProcessor(org.apache.cassandra.cql3.QueryProcessor) Supplier(java.util.function.Supplier) REPLICATION_DONE_REQ(org.apache.cassandra.net.Verb.REPLICATION_DONE_REQ) Schema(org.apache.cassandra.schema.Schema) ActiveRepairService(org.apache.cassandra.service.ActiveRepairService) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) FullQueryLoggerOptionsCompositeData(org.apache.cassandra.fql.FullQueryLoggerOptionsCompositeData) Predicates(com.google.common.base.Predicates) BatchlogManager(org.apache.cassandra.batchlog.BatchlogManager) StreamSupport(java.util.stream.StreamSupport) ProgressEventType(org.apache.cassandra.utils.progress.ProgressEventType) Nullable(javax.annotation.Nullable) CompositeData(javax.management.openmbean.CompositeData) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) IOError(java.io.IOError) ExecutionException(java.util.concurrent.ExecutionException) Paths(java.nio.file.Paths) ImmediateFuture(org.apache.cassandra.utils.concurrent.ImmediateFuture) org.apache.cassandra.net(org.apache.cassandra.net) SnapshotManager(org.apache.cassandra.service.snapshot.SnapshotManager) Preconditions(com.google.common.base.Preconditions) org.apache.cassandra.locator(org.apache.cassandra.locator) AuditLogManager(org.apache.cassandra.audit.AuditLogManager) Verifier(org.apache.cassandra.db.compaction.Verifier) org.apache.cassandra.repair(org.apache.cassandra.repair) org.apache.cassandra.streaming(org.apache.cassandra.streaming) Iterables.transform(com.google.common.collect.Iterables.transform) Iterables.tryFind(com.google.common.collect.Iterables.tryFind) MatchResult(java.util.regex.MatchResult) LoggerFactory(org.slf4j.LoggerFactory) org.apache.cassandra.db(org.apache.cassandra.db) ReplicationParams(org.apache.cassandra.schema.ReplicationParams) TimeoutException(java.util.concurrent.TimeoutException) org.apache.cassandra.gms(org.apache.cassandra.gms) com.google.common.util.concurrent(com.google.common.util.concurrent) TabularData(javax.management.openmbean.TabularData) ByteBuffer(java.nio.ByteBuffer) MigrationManager(org.apache.cassandra.schema.MigrationManager) ByteArrayInputStream(java.io.ByteArrayInputStream) Collectors.toMap(java.util.stream.Collectors.toMap) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) SSTableFormat(org.apache.cassandra.io.sstable.format.SSTableFormat) AuthSchemaChangeListener(org.apache.cassandra.auth.AuthSchemaChangeListener) PathUtils(org.apache.cassandra.io.util.PathUtils) com.google.common.collect(com.google.common.collect) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) AuditLogOptions(org.apache.cassandra.audit.AuditLogOptions) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) TombstoneOption(org.apache.cassandra.schema.CompactionParams.TombstoneOption) JMXProgressSupport(org.apache.cassandra.utils.progress.jmx.JMXProgressSupport) Instant(java.time.Instant) org.apache.cassandra.dht(org.apache.cassandra.dht) InetSocketAddress(java.net.InetSocketAddress) Collectors(java.util.stream.Collectors) UncheckedInterruptedException(org.apache.cassandra.utils.concurrent.UncheckedInterruptedException) ProgressListener(org.apache.cassandra.utils.progress.ProgressListener) AuthCacheService(org.apache.cassandra.auth.AuthCacheService) DurationSpec(org.apache.cassandra.config.DurationSpec) MigrationManager.evolveSystemKeyspace(org.apache.cassandra.schema.MigrationManager.evolveSystemKeyspace) ParameterizedClass(org.apache.cassandra.config.ParameterizedClass) LifecycleTransaction(org.apache.cassandra.db.lifecycle.LifecycleTransaction) Future(org.apache.cassandra.utils.concurrent.Future) org.apache.cassandra.exceptions(org.apache.cassandra.exceptions) FileUtils(org.apache.cassandra.io.util.FileUtils) TableMetadataRef(org.apache.cassandra.schema.TableMetadataRef) TabularDataSupport(javax.management.openmbean.TabularDataSupport) Entry(java.util.Map.Entry) org.apache.cassandra.concurrent(org.apache.cassandra.concurrent) TableMetadata(org.apache.cassandra.schema.TableMetadata) VirtualKeyspaceRegistry(org.apache.cassandra.db.virtual.VirtualKeyspaceRegistry) Pattern(java.util.regex.Pattern) MigrationCoordinator(org.apache.cassandra.schema.MigrationCoordinator) SchemaConstants(org.apache.cassandra.schema.SchemaConstants) Arrays.stream(java.util.Arrays.stream) DataInputStream(java.io.DataInputStream) RepairOption(org.apache.cassandra.repair.messages.RepairOption) NANOSECONDS(java.util.concurrent.TimeUnit.NANOSECONDS) Config(org.apache.cassandra.config.Config) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) QueryHandler(org.apache.cassandra.cql3.QueryHandler) BOOTSTRAP_SKIP_SCHEMA_CHECK(org.apache.cassandra.config.CassandraRelevantProperties.BOOTSTRAP_SKIP_SCHEMA_CHECK) FetchReplica(org.apache.cassandra.dht.RangeStreamer.FetchReplica) ProtocolVersion(org.apache.cassandra.transport.ProtocolVersion) ExecutorService(java.util.concurrent.ExecutorService) Logger(org.slf4j.Logger) javax.management(javax.management) REPLACEMENT_ALLOW_EMPTY(org.apache.cassandra.config.CassandraRelevantProperties.REPLACEMENT_ALLOW_EMPTY) TimeUnit(java.util.concurrent.TimeUnit) Collectors.toList(java.util.stream.Collectors.toList) VisibleForTesting(com.google.common.annotations.VisibleForTesting) BOOTSTRAP_SCHEMA_DELAY_MS(org.apache.cassandra.config.CassandraRelevantProperties.BOOTSTRAP_SCHEMA_DELAY_MS) TableSnapshot(org.apache.cassandra.service.snapshot.TableSnapshot) TraceKeyspace(org.apache.cassandra.tracing.TraceKeyspace) AuthKeyspace(org.apache.cassandra.auth.AuthKeyspace) SystemDistributedKeyspace(org.apache.cassandra.schema.SystemDistributedKeyspace) MigrationManager.evolveSystemKeyspace(org.apache.cassandra.schema.MigrationManager.evolveSystemKeyspace) ExecutorService(java.util.concurrent.ExecutorService) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ImmediateFuture(org.apache.cassandra.utils.concurrent.ImmediateFuture) Future(org.apache.cassandra.utils.concurrent.Future) TimeoutException(java.util.concurrent.TimeoutException)

Aggregations

Future (org.apache.cassandra.utils.concurrent.Future)29 ImmediateFuture (org.apache.cassandra.utils.concurrent.ImmediateFuture)12 InetAddressAndPort (org.apache.cassandra.locator.InetAddressAndPort)11 Test (org.junit.Test)8 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)7 DatabaseDescriptor (org.apache.cassandra.config.DatabaseDescriptor)7 ExecutionException (java.util.concurrent.ExecutionException)6 Collectors (java.util.stream.Collectors)6 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)6 CoordinatedRepairResult (org.apache.cassandra.repair.CoordinatedRepairResult)6 Logger (org.slf4j.Logger)6 LoggerFactory (org.slf4j.LoggerFactory)6 VisibleForTesting (com.google.common.annotations.VisibleForTesting)5 UUID (java.util.UUID)5 TimeUnit (java.util.concurrent.TimeUnit)5 AbstractRepairTest (org.apache.cassandra.repair.AbstractRepairTest)5 AsyncFuture (org.apache.cassandra.utils.concurrent.AsyncFuture)5 ByteBuffer (java.nio.ByteBuffer)4 java.util (java.util)4 Range (org.apache.cassandra.dht.Range)4