use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class StorageProxy method wrapViewBatchResponseHandler.
/**
* Same as performWrites except does not initiate writes (but does perform availability checks).
* Keeps track of ViewWriteMetrics
*/
private static WriteResponseHandlerWrapper wrapViewBatchResponseHandler(Mutation mutation, ConsistencyLevel consistencyLevel, ConsistencyLevel batchConsistencyLevel, ReplicaLayout.ForTokenWrite liveAndDown, AtomicLong baseComplete, WriteType writeType, BatchlogResponseHandler.BatchlogCleanup cleanup, long queryStartNanoTime) {
Keyspace keyspace = Keyspace.open(mutation.getKeyspaceName());
ReplicaPlan.ForTokenWrite replicaPlan = ReplicaPlans.forWrite(keyspace, consistencyLevel, liveAndDown, ReplicaPlans.writeAll);
AbstractReplicationStrategy replicationStrategy = replicaPlan.replicationStrategy();
AbstractWriteResponseHandler<IMutation> writeHandler = replicationStrategy.getWriteResponseHandler(replicaPlan, () -> {
long delay = Math.max(0, currentTimeMillis() - baseComplete.get());
viewWriteMetrics.viewWriteLatency.update(delay, MILLISECONDS);
}, writeType, queryStartNanoTime);
BatchlogResponseHandler<IMutation> batchHandler = new ViewWriteMetricsWrapped(writeHandler, batchConsistencyLevel.blockFor(replicationStrategy), cleanup, queryStartNanoTime);
return new WriteResponseHandlerWrapper(batchHandler, mutation);
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class CassandraDaemon method migrateSystemDataIfNeeded.
/**
* Checks if the data of the local system keyspaces need to be migrated to a different location.
*
* @throws IOException
*/
public void migrateSystemDataIfNeeded() throws IOException {
// anything. If it is not the case we want to try to migrate the data.
if (!DatabaseDescriptor.useSpecificLocationForLocalSystemData() && DatabaseDescriptor.getNonLocalSystemKeyspacesDataFileLocations().length <= 1)
return;
// We can face several cases:
// 1) The system data are spread accross the data file locations and need to be moved to
// the first data location (upgrade to 4.0)
// 2) The system data are spread accross the data file locations and need to be moved to
// the system keyspace location configured by the user (upgrade to 4.0)
// 3) The system data are stored in the first data location and need to be moved to
// the system keyspace location configured by the user (system_data_file_directory has been configured)
Path target = Paths.get(DatabaseDescriptor.getLocalSystemKeyspacesDataFileLocations()[0]);
String[] nonLocalSystemKeyspacesFileLocations = DatabaseDescriptor.getNonLocalSystemKeyspacesDataFileLocations();
String[] sources = DatabaseDescriptor.useSpecificLocationForLocalSystemData() ? nonLocalSystemKeyspacesFileLocations : Arrays.copyOfRange(nonLocalSystemKeyspacesFileLocations, 1, nonLocalSystemKeyspacesFileLocations.length);
for (String source : sources) {
Path dataFileLocation = Paths.get(source);
if (!Files.exists(dataFileLocation))
continue;
try (Stream<Path> locationChildren = Files.list(dataFileLocation)) {
Path[] keyspaceDirectories = locationChildren.filter(p -> SchemaConstants.isLocalSystemKeyspace(p.getFileName().toString())).toArray(Path[]::new);
for (Path keyspaceDirectory : keyspaceDirectories) {
try (Stream<Path> keyspaceChildren = Files.list(keyspaceDirectory)) {
Path[] tableDirectories = keyspaceChildren.filter(Files::isDirectory).filter(p -> !SystemKeyspace.TABLES_SPLIT_ACROSS_MULTIPLE_DISKS.contains(p.getFileName().toString())).toArray(Path[]::new);
for (Path tableDirectory : tableDirectories) {
FileUtils.moveRecursively(tableDirectory, target.resolve(dataFileLocation.relativize(tableDirectory)));
}
if (!SchemaConstants.SYSTEM_KEYSPACE_NAME.equals(keyspaceDirectory.getFileName().toString())) {
FileUtils.deleteDirectoryIfEmpty(keyspaceDirectory);
}
}
}
}
}
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class CassandraDaemon method setup.
/**
* This is a hook for concrete daemons to initialize themselves suitably.
*
* Subclasses should override this to finish the job (listening on ports, etc.)
*/
protected void setup() {
FileUtils.setFSErrorHandler(new DefaultFSErrorHandler());
// snapshots and upgrading system tables.
try {
migrateSystemDataIfNeeded();
} catch (IOException e) {
exitOrFail(StartupException.ERR_WRONG_DISK_STATE, e.getMessage(), e);
}
maybeInitJmx();
Mx4jTool.maybeLoad();
ThreadAwareSecurityManager.install();
logSystemInfo();
NativeLibrary.tryMlockall();
CommitLog.instance.start();
runStartupChecks();
try {
SystemKeyspace.snapshotOnVersionChange();
} catch (IOException e) {
exitOrFail(StartupException.ERR_WRONG_DISK_STATE, e.getMessage(), e.getCause());
}
// We need to persist this as soon as possible after startup checks.
// This should be the first write to SystemKeyspace (CASSANDRA-11742)
SystemKeyspace.persistLocalMetadata();
Thread.setDefaultUncaughtExceptionHandler(JVMStabilityInspector::uncaughtException);
SystemKeyspaceMigrator40.migrate();
// Populate token metadata before flushing, for token-aware sstable partitioning (#6696)
StorageService.instance.populateTokenMetadata();
try {
// load schema from disk
Schema.instance.loadFromDisk();
} catch (Exception e) {
logger.error("Error while loading schema: ", e);
throw e;
}
setupVirtualKeyspaces();
SSTableHeaderFix.fixNonFrozenUDTIfUpgradeFrom30();
// clean up debris in the rest of the keyspaces
for (String keyspaceName : Schema.instance.getKeyspaces()) {
// Skip system as we've already cleaned it
if (keyspaceName.equals(SchemaConstants.SYSTEM_KEYSPACE_NAME))
continue;
for (TableMetadata cfm : Schema.instance.getTablesAndViews(keyspaceName)) {
try {
ColumnFamilyStore.scrubDataDirectories(cfm);
} catch (StartupException e) {
exitOrFail(e.returnCode, e.getMessage(), e.getCause());
}
}
}
Keyspace.setInitialized();
// initialize keyspaces
for (String keyspaceName : Schema.instance.getKeyspaces()) {
if (logger.isDebugEnabled())
logger.debug("opening keyspace {}", keyspaceName);
// disable auto compaction until gossip settles since disk boundaries may be affected by ring layout
for (ColumnFamilyStore cfs : Keyspace.open(keyspaceName).getColumnFamilyStores()) {
for (ColumnFamilyStore store : cfs.concatWithIndexes()) {
store.disableAutoCompaction();
}
}
}
try {
loadRowAndKeyCacheAsync().get();
} catch (Throwable t) {
JVMStabilityInspector.inspectThrowable(t);
logger.warn("Error loading key or row cache", t);
}
try {
GCInspector.register();
} catch (Throwable t) {
JVMStabilityInspector.inspectThrowable(t);
logger.warn("Unable to start GCInspector (currently only supported on the Sun JVM)");
}
// Replay any CommitLogSegments found on disk
try {
CommitLog.instance.recoverSegmentsOnDisk();
} catch (IOException e) {
throw new RuntimeException(e);
}
// Re-populate token metadata after commit log recover (new peers might be loaded onto system keyspace #10293)
StorageService.instance.populateTokenMetadata();
SystemKeyspace.finishStartup();
// Clean up system.size_estimates entries left lying around from missed keyspace drops (CASSANDRA-14905)
StorageService.instance.cleanupSizeEstimates();
// schedule periodic dumps of table size estimates into SystemKeyspace.SIZE_ESTIMATES_CF
// set cassandra.size_recorder_interval to 0 to disable
int sizeRecorderInterval = Integer.getInteger("cassandra.size_recorder_interval", 5 * 60);
if (sizeRecorderInterval > 0)
ScheduledExecutors.optionalTasks.scheduleWithFixedDelay(SizeEstimatesRecorder.instance, 30, sizeRecorderInterval, TimeUnit.SECONDS);
ActiveRepairService.instance.start();
// Prepared statements
QueryProcessor.instance.preloadPreparedStatements();
// Metrics
String metricsReporterConfigFile = System.getProperty("cassandra.metricsReporterConfigFile");
if (metricsReporterConfigFile != null) {
logger.info("Trying to load metrics-reporter-config from file: {}", metricsReporterConfigFile);
try {
// enable metrics provided by metrics-jvm.jar
CassandraMetricsRegistry.Metrics.register("jvm.buffers", new BufferPoolMetricSet(ManagementFactory.getPlatformMBeanServer()));
CassandraMetricsRegistry.Metrics.register("jvm.gc", new GarbageCollectorMetricSet());
CassandraMetricsRegistry.Metrics.register("jvm.memory", new MemoryUsageGaugeSet());
CassandraMetricsRegistry.Metrics.register("jvm.fd.usage", new FileDescriptorRatioGauge());
// initialize metrics-reporter-config from yaml file
URL resource = CassandraDaemon.class.getClassLoader().getResource(metricsReporterConfigFile);
if (resource == null) {
logger.warn("Failed to load metrics-reporter-config, file does not exist: {}", metricsReporterConfigFile);
} else {
String reportFileLocation = resource.getFile();
ReporterConfig.loadFromFile(reportFileLocation).enableAll(CassandraMetricsRegistry.Metrics);
}
} catch (Exception e) {
logger.warn("Failed to load metrics-reporter-config, metric sinks will not be activated", e);
}
}
// start server internals
StorageService.instance.registerDaemon(this);
try {
StorageService.instance.initServer();
} catch (ConfigurationException e) {
System.err.println(e.getMessage() + "\nFatal configuration error; unable to start server. See log for stacktrace.");
exitOrFail(1, "Fatal configuration error", e);
}
// Because we are writing to the system_distributed keyspace, this should happen after that is created, which
// happens in StorageService.instance.initServer()
Runnable viewRebuild = () -> {
for (Keyspace keyspace : Keyspace.all()) {
keyspace.viewManager.buildAllViews();
}
logger.debug("Completed submission of build tasks for any materialized views defined at startup");
};
ScheduledExecutors.optionalTasks.schedule(viewRebuild, StorageService.RING_DELAY, TimeUnit.MILLISECONDS);
if (!FBUtilities.getBroadcastAddressAndPort().equals(InetAddressAndPort.getLoopbackAddress()))
Gossiper.waitToSettle();
StorageService.instance.doAuthSetup(false);
// re-enable auto-compaction after gossip is settled, so correct disk boundaries are used
for (Keyspace keyspace : Keyspace.all()) {
for (ColumnFamilyStore cfs : keyspace.getColumnFamilyStores()) {
for (final ColumnFamilyStore store : cfs.concatWithIndexes()) {
// reload CFs in case there was a change of disk boundaries
store.reload();
if (store.getCompactionStrategyManager().shouldBeEnabled()) {
if (DatabaseDescriptor.getAutocompactionOnStartupEnabled()) {
store.enableAutoCompaction();
} else {
logger.info("Not enabling compaction for {}.{}; autocompaction_on_startup_enabled is set to false", store.keyspace.getName(), store.name);
}
}
}
}
}
AuditLogManager.instance.initialize();
// schedule periodic background compaction task submission. this is simply a backstop against compactions stalling
// due to scheduling errors or race conditions
ScheduledExecutors.optionalTasks.scheduleWithFixedDelay(ColumnFamilyStore.getBackgroundCompactionTaskSubmitter(), 5, 1, TimeUnit.MINUTES);
// schedule periodic recomputation of speculative retry thresholds
ScheduledExecutors.optionalTasks.scheduleWithFixedDelay(SPECULATION_THRESHOLD_UPDATER, DatabaseDescriptor.getReadRpcTimeout(NANOSECONDS), DatabaseDescriptor.getReadRpcTimeout(NANOSECONDS), NANOSECONDS);
initializeClientTransports();
// init below this mark before completeSetup().
if (DatabaseDescriptor.getAuthCacheWarmingEnabled())
AuthCacheService.instance.warmCaches();
else
logger.info("Prewarming of auth caches is disabled");
completeSetup();
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class RangeRelocator method calculateToFromStreams.
public void calculateToFromStreams() {
logger.debug("Current tmd: {}, Updated tmd: {}", tokenMetaClone, tokenMetaCloneAllSettled);
for (String keyspace : keyspaceNames) {
// replication strategy of the current keyspace
AbstractReplicationStrategy strategy = Keyspace.open(keyspace).getReplicationStrategy();
logger.info("Calculating ranges to stream and request for keyspace {}", keyspace);
// From what I have seen we only ever call this with a single token from StorageService.move(Token)
for (Token newToken : tokens) {
Collection<Token> currentTokens = tokenMetaClone.getTokens(localAddress);
if (currentTokens.size() > 1 || currentTokens.isEmpty()) {
throw new AssertionError("Unexpected current tokens: " + currentTokens);
}
// calculated parts of the ranges to request/stream from/to nodes in the ring
Pair<RangesAtEndpoint, RangesAtEndpoint> streamAndFetchOwnRanges;
// so it's easier to just identify this case up front.
if (tokenMetaClone.getTopology().getDatacenterEndpoints().get(DatabaseDescriptor.getEndpointSnitch().getLocalDatacenter()).size() > 1) {
// getting collection of the currently used ranges by this keyspace
RangesAtEndpoint currentReplicas = strategy.getAddressReplicas(localAddress);
// collection of ranges which this node will serve after move to the new token
RangesAtEndpoint updatedReplicas = strategy.getPendingAddressRanges(tokenMetaClone, newToken, localAddress);
streamAndFetchOwnRanges = calculateStreamAndFetchRanges(currentReplicas, updatedReplicas);
} else {
streamAndFetchOwnRanges = Pair.create(RangesAtEndpoint.empty(localAddress), RangesAtEndpoint.empty(localAddress));
}
RangesByEndpoint rangesToStream = calculateRangesToStreamWithEndpoints(streamAndFetchOwnRanges.left, strategy, tokenMetaClone, tokenMetaCloneAllSettled);
logger.info("Endpoint ranges to stream to " + rangesToStream);
// stream ranges
for (InetAddressAndPort address : rangesToStream.keySet()) {
logger.debug("Will stream range {} of keyspace {} to endpoint {}", rangesToStream.get(address), keyspace, address);
RangesAtEndpoint ranges = rangesToStream.get(address);
streamPlan.transferRanges(address, keyspace, ranges);
}
Multimap<InetAddressAndPort, RangeStreamer.FetchReplica> rangesToFetch = calculateRangesToFetchWithPreferredEndpoints(streamAndFetchOwnRanges.right, strategy, keyspace, tokenMetaClone, tokenMetaCloneAllSettled);
// stream requests
rangesToFetch.asMap().forEach((address, sourceAndOurReplicas) -> {
RangesAtEndpoint full = sourceAndOurReplicas.stream().filter(pair -> pair.remote.isFull()).map(pair -> pair.local).collect(RangesAtEndpoint.collector(localAddress));
RangesAtEndpoint trans = sourceAndOurReplicas.stream().filter(pair -> pair.remote.isTransient()).map(pair -> pair.local).collect(RangesAtEndpoint.collector(localAddress));
logger.debug("Will request range {} of keyspace {} from endpoint {}", rangesToFetch.get(address), keyspace, address);
streamPlan.requestRanges(address, keyspace, full, trans);
});
logger.debug("Keyspace {}: work map {}.", keyspace, rangesToFetch);
}
}
}
use of org.apache.cassandra.db.Keyspace in project cassandra by apache.
the class RealTransactionsTest method testRewriteAborted.
@Test
public void testRewriteAborted() throws IOException {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(REWRITE_ABORTED_CF);
SSTableReader oldSSTable = getSSTable(cfs, 1);
LifecycleTransaction txn = cfs.getTracker().tryModify(oldSSTable, OperationType.COMPACTION);
replaceSSTable(cfs, txn, true);
LogTransaction.waitForDeletions();
assertFiles(oldSSTable.descriptor.directory.path(), new HashSet<>(oldSSTable.getAllFilePaths()));
}
Aggregations