use of org.apache.cassandra.db.commitlog.CommitLog in project cassandra by apache.
the class StorageService method drain.
protected synchronized void drain(boolean isFinalShutdown) throws IOException, InterruptedException, ExecutionException {
ExecutorService counterMutationStage = StageManager.getStage(Stage.COUNTER_MUTATION);
ExecutorService viewMutationStage = StageManager.getStage(Stage.VIEW_MUTATION);
ExecutorService mutationStage = StageManager.getStage(Stage.MUTATION);
if (mutationStage.isTerminated() && counterMutationStage.isTerminated() && viewMutationStage.isTerminated()) {
if (!isFinalShutdown)
logger.warn("Cannot drain node (did it already happen?)");
return;
}
assert !isShutdown;
isShutdown = true;
Throwable preShutdownHookThrowable = Throwables.perform(null, preShutdownHooks.stream().map(h -> h::run));
if (preShutdownHookThrowable != null)
logger.error("Attempting to continue draining after pre-shutdown hooks returned exception", preShutdownHookThrowable);
try {
setMode(Mode.DRAINING, "starting drain process", !isFinalShutdown);
BatchlogManager.instance.shutdown();
HintsService.instance.pauseDispatch();
if (daemon != null)
shutdownClientServers();
ScheduledExecutors.optionalTasks.shutdown();
Gossiper.instance.stop();
if (!isFinalShutdown)
setMode(Mode.DRAINING, "shutting down MessageService", false);
// In-progress writes originating here could generate hints to be written, so shut down MessagingService
// before mutation stage, so we can get all the hints saved before shutting down
MessagingService.instance().shutdown();
if (!isFinalShutdown)
setMode(Mode.DRAINING, "clearing mutation stage", false);
viewMutationStage.shutdown();
counterMutationStage.shutdown();
mutationStage.shutdown();
viewMutationStage.awaitTermination(3600, TimeUnit.SECONDS);
counterMutationStage.awaitTermination(3600, TimeUnit.SECONDS);
mutationStage.awaitTermination(3600, TimeUnit.SECONDS);
StorageProxy.instance.verifyNoHintsInProgress();
if (!isFinalShutdown)
setMode(Mode.DRAINING, "flushing column families", false);
// disable autocompaction - we don't want to start any new compactions while we are draining
for (Keyspace keyspace : Keyspace.all()) for (ColumnFamilyStore cfs : keyspace.getColumnFamilyStores()) cfs.disableAutoCompaction();
// count CFs first, since forceFlush could block for the flushWriter to get a queue slot empty
totalCFs = 0;
for (Keyspace keyspace : Keyspace.nonSystem()) totalCFs += keyspace.getColumnFamilyStores().size();
remainingCFs = totalCFs;
// flush
List<Future<?>> flushes = new ArrayList<>();
for (Keyspace keyspace : Keyspace.nonSystem()) {
for (ColumnFamilyStore cfs : keyspace.getColumnFamilyStores()) flushes.add(cfs.forceFlush());
}
// thus make several short ones "instant" if we wait for them later.
for (Future f : flushes) {
try {
FBUtilities.waitOnFuture(f);
} catch (Throwable t) {
JVMStabilityInspector.inspectThrowable(t);
// don't let this stop us from shutting down the commitlog and other thread pools
logger.warn("Caught exception while waiting for memtable flushes during shutdown hook", t);
}
remainingCFs--;
}
// Interrupt ongoing compactions and shutdown CM to prevent further compactions.
CompactionManager.instance.forceShutdown();
// Flush the system tables after all other tables are flushed, just in case flushing modifies any system state
// like CASSANDRA-5151. Don't bother with progress tracking since system data is tiny.
// Flush system tables after stopping compactions since they modify
// system tables (for example compactions can obsolete sstables and the tidiers in SSTableReader update
// system tables, see SSTableReader.GlobalTidy)
flushes.clear();
for (Keyspace keyspace : Keyspace.system()) {
for (ColumnFamilyStore cfs : keyspace.getColumnFamilyStores()) flushes.add(cfs.forceFlush());
}
FBUtilities.waitOnFutures(flushes);
HintsService.instance.shutdownBlocking();
// Interrupt ongoing compactions and shutdown CM to prevent further compactions.
CompactionManager.instance.forceShutdown();
// whilst we've flushed all the CFs, which will have recycled all completed segments, we want to ensure
// there are no segments to replay, so we force the recycling of any remaining (should be at most one)
CommitLog.instance.forceRecycleAllSegments();
CommitLog.instance.shutdownBlocking();
// wait for miscellaneous tasks like sstable and commitlog segment deletion
ScheduledExecutors.nonPeriodicTasks.shutdown();
if (!ScheduledExecutors.nonPeriodicTasks.awaitTermination(1, TimeUnit.MINUTES))
logger.warn("Failed to wait for non periodic tasks to shutdown");
ColumnFamilyStore.shutdownPostFlushExecutor();
setMode(Mode.DRAINED, !isFinalShutdown);
} catch (Throwable t) {
logger.error("Caught an exception while draining ", t);
} finally {
Throwable postShutdownHookThrowable = Throwables.perform(null, postShutdownHooks.stream().map(h -> h::run));
if (postShutdownHookThrowable != null)
logger.error("Post-shutdown hooks returned exception", postShutdownHookThrowable);
}
}
Aggregations