use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.
the class CompactionManager method forceUserDefinedCleanup.
public void forceUserDefinedCleanup(String dataFiles) {
String[] filenames = dataFiles.split(",");
HashMap<ColumnFamilyStore, Descriptor> descriptors = Maps.newHashMap();
for (String filename : filenames) {
// extract keyspace and columnfamily name from filename
Descriptor desc = Descriptor.fromFilename(filename.trim());
if (Schema.instance.getTableMetadataRef(desc) == null) {
logger.warn("Schema does not exist for file {}. Skipping.", filename);
continue;
}
// group by keyspace/columnfamily
ColumnFamilyStore cfs = Keyspace.open(desc.ksname).getColumnFamilyStore(desc.cfname);
desc = cfs.getDirectories().find(new File(filename.trim()).getName());
if (desc != null)
descriptors.put(cfs, desc);
}
for (Map.Entry<ColumnFamilyStore, Descriptor> entry : descriptors.entrySet()) {
ColumnFamilyStore cfs = entry.getKey();
Keyspace keyspace = cfs.keyspace;
Collection<Range<Token>> ranges = StorageService.instance.getLocalRanges(keyspace.getName());
boolean hasIndexes = cfs.indexManager.hasIndexes();
SSTableReader sstable = lookupSSTable(cfs, entry.getValue());
if (ranges.isEmpty()) {
logger.error("Cleanup cannot run before a node has joined the ring");
return;
}
if (sstable == null) {
logger.warn("Will not clean {}, it is not an active sstable", entry.getValue());
} else {
CleanupStrategy cleanupStrategy = CleanupStrategy.get(cfs, ranges, FBUtilities.nowInSeconds());
try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstable, OperationType.CLEANUP)) {
doCleanupOne(cfs, txn, cleanupStrategy, ranges, hasIndexes);
} catch (IOException e) {
logger.error("forceUserDefinedCleanup failed: {}", e.getLocalizedMessage());
}
}
}
}
use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.
the class ColumnFamilyStore method loadNewSSTables.
/**
* #{@inheritDoc}
*/
public synchronized void loadNewSSTables() {
logger.info("Loading new SSTables for {}/{}...", keyspace.getName(), name);
Set<Descriptor> currentDescriptors = new HashSet<>();
for (SSTableReader sstable : getSSTables(SSTableSet.CANONICAL)) currentDescriptors.add(sstable.descriptor);
Set<SSTableReader> newSSTables = new HashSet<>();
Directories.SSTableLister lister = getDirectories().sstableLister(Directories.OnTxnErr.IGNORE).skipTemporary(true);
for (Map.Entry<Descriptor, Set<Component>> entry : lister.list().entrySet()) {
Descriptor descriptor = entry.getKey();
if (currentDescriptors.contains(descriptor))
// old (initialized) SSTable found, skipping
continue;
if (!descriptor.isCompatible())
throw new RuntimeException(String.format("Can't open incompatible SSTable! Current version %s, found file: %s", descriptor.getFormat().getLatestVersion(), descriptor));
// force foreign sstables to level 0
try {
if (new File(descriptor.filenameFor(Component.STATS)).exists())
descriptor.getMetadataSerializer().mutateLevel(descriptor, 0);
} catch (IOException e) {
SSTableReader.logOpenException(entry.getKey(), e);
continue;
}
// Increment the generation until we find a filename that doesn't exist. This is needed because the new
// SSTables that are being loaded might already use these generation numbers.
Descriptor newDescriptor;
do {
newDescriptor = new Descriptor(descriptor.version, descriptor.directory, descriptor.ksname, descriptor.cfname, fileIndexGenerator.incrementAndGet(), descriptor.formatType);
} while (new File(newDescriptor.filenameFor(Component.DATA)).exists());
logger.info("Renaming new SSTable {} to {}", descriptor, newDescriptor);
SSTableWriter.rename(descriptor, newDescriptor, entry.getValue());
SSTableReader reader;
try {
reader = SSTableReader.open(newDescriptor, entry.getValue(), metadata);
} catch (IOException e) {
SSTableReader.logOpenException(entry.getKey(), e);
continue;
}
newSSTables.add(reader);
}
if (newSSTables.isEmpty()) {
logger.info("No new SSTables were found for {}/{}", keyspace.getName(), name);
return;
}
logger.info("Loading new SSTables and building secondary indexes for {}/{}: {}", keyspace.getName(), name, newSSTables);
try (Refs<SSTableReader> refs = Refs.ref(newSSTables)) {
data.addSSTables(newSSTables);
indexManager.buildAllIndexesBlocking(newSSTables);
}
logger.info("Done loading load new SSTables for {}/{}", keyspace.getName(), name);
}
use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.
the class ColumnFamilyStore method scrubDataDirectories.
/**
* Removes unnecessary files from the cf directory at startup: these include temp files, orphans, zero-length files
* and compacted sstables. Files that cannot be recognized will be ignored.
*/
public static void scrubDataDirectories(TableMetadata metadata) throws StartupException {
Directories directories = new Directories(metadata, initialDirectories);
Set<File> cleanedDirectories = new HashSet<>();
// clear ephemeral snapshots that were not properly cleared last session (CASSANDRA-7357)
clearEphemeralSnapshots(directories);
directories.removeTemporaryDirectories();
logger.trace("Removing temporary or obsoleted files from unfinished operations for table {}", metadata.name);
if (!LifecycleTransaction.removeUnfinishedLeftovers(metadata))
throw new StartupException(StartupException.ERR_WRONG_DISK_STATE, String.format("Cannot remove temporary or obsoleted files for %s due to a problem with transaction " + "log files. Please check records with problems in the log messages above and fix them. " + "Refer to the 3.0 upgrading instructions in NEWS.txt " + "for a description of transaction log files.", metadata.toString()));
logger.trace("Further extra check for orphan sstable files for {}", metadata.name);
for (Map.Entry<Descriptor, Set<Component>> sstableFiles : directories.sstableLister(Directories.OnTxnErr.IGNORE).list().entrySet()) {
Descriptor desc = sstableFiles.getKey();
File directory = desc.directory;
Set<Component> components = sstableFiles.getValue();
if (!cleanedDirectories.contains(directory)) {
cleanedDirectories.add(directory);
for (File tmpFile : desc.getTemporaryFiles()) tmpFile.delete();
}
File dataFile = new File(desc.filenameFor(Component.DATA));
if (components.contains(Component.DATA) && dataFile.length() > 0)
// everything appears to be in order... moving on.
continue;
// missing the DATA file! all components are orphaned
logger.warn("Removing orphans for {}: {}", desc, components);
for (Component component : components) {
File file = new File(desc.filenameFor(component));
if (file.exists())
FileUtils.deleteWithConfirm(desc.filenameFor(component));
}
}
// cleanup incomplete saved caches
Pattern tmpCacheFilePattern = Pattern.compile(metadata.keyspace + "-" + metadata.name + "-(Key|Row)Cache.*\\.tmp$");
File dir = new File(DatabaseDescriptor.getSavedCachesLocation());
if (dir.exists()) {
assert dir.isDirectory();
for (File file : dir.listFiles()) if (tmpCacheFilePattern.matcher(file.getName()).matches())
if (!file.delete())
logger.warn("could not delete {}", file.getAbsolutePath());
}
// also clean out any index leftovers.
for (IndexMetadata index : metadata.indexes) if (!index.isCustom()) {
TableMetadata indexMetadata = CassandraIndex.indexCfsMetadata(metadata, index);
scrubDataDirectories(indexMetadata);
}
}
use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.
the class SSTableExport method main.
/**
* Given arguments specifying an SSTable, and optionally an output file, export the contents of the SSTable to JSON.
*
* @param args
* command lines arguments
* @throws ConfigurationException
* on configuration failure (wrong params given)
*/
public static void main(String[] args) throws ConfigurationException {
CommandLineParser parser = new PosixParser();
try {
cmd = parser.parse(options, args);
} catch (ParseException e1) {
System.err.println(e1.getMessage());
printUsage();
System.exit(1);
}
if (cmd.getArgs().length != 1) {
System.err.println("You must supply exactly one sstable");
printUsage();
System.exit(1);
}
String[] keys = cmd.getOptionValues(KEY_OPTION);
HashSet<String> excludes = new HashSet<>(Arrays.asList(cmd.getOptionValues(EXCLUDE_KEY_OPTION) == null ? new String[0] : cmd.getOptionValues(EXCLUDE_KEY_OPTION)));
String ssTableFileName = new File(cmd.getArgs()[0]).getAbsolutePath();
if (!new File(ssTableFileName).exists()) {
System.err.println("Cannot find file " + ssTableFileName);
System.exit(1);
}
Descriptor desc = Descriptor.fromFilename(ssTableFileName);
try {
TableMetadata metadata = metadataFromSSTable(desc);
if (cmd.hasOption(ENUMERATE_KEYS_OPTION)) {
try (KeyIterator iter = new KeyIterator(desc, metadata)) {
JsonTransformer.keysToJson(null, iterToStream(iter), cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
}
} else {
SSTableReader sstable = SSTableReader.openNoValidation(desc, TableMetadataRef.forOfflineTools(metadata));
IPartitioner partitioner = sstable.getPartitioner();
final ISSTableScanner currentScanner;
if ((keys != null) && (keys.length > 0)) {
List<AbstractBounds<PartitionPosition>> bounds = Arrays.stream(keys).filter(key -> !excludes.contains(key)).map(metadata.partitionKeyType::fromString).map(partitioner::decorateKey).sorted().map(DecoratedKey::getToken).map(token -> new Bounds<>(token.minKeyBound(), token.maxKeyBound())).collect(Collectors.toList());
currentScanner = sstable.getScanner(bounds.iterator());
} else {
currentScanner = sstable.getScanner();
}
Stream<UnfilteredRowIterator> partitions = iterToStream(currentScanner).filter(i -> excludes.isEmpty() || !excludes.contains(metadata.partitionKeyType.getString(i.partitionKey().getKey())));
if (cmd.hasOption(DEBUG_OUTPUT_OPTION)) {
AtomicLong position = new AtomicLong();
partitions.forEach(partition -> {
position.set(currentScanner.getCurrentPosition());
if (!partition.partitionLevelDeletion().isLive()) {
System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + partition.partitionLevelDeletion());
}
if (!partition.staticRow().isEmpty()) {
System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + partition.staticRow().toString(metadata, true));
}
partition.forEachRemaining(row -> {
System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + row.toString(metadata, false, true));
position.set(currentScanner.getCurrentPosition());
});
});
} else {
JsonTransformer.toJson(currentScanner, partitions, cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
}
}
} catch (IOException e) {
// throwing exception outside main with broken pipe causes windows cmd to hang
e.printStackTrace(System.err);
}
System.exit(0);
}
use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.
the class SSTableOfflineRelevel method main.
/**
* @param args a list of sstables whose metadata we are changing
*/
public static void main(String[] args) throws IOException {
PrintStream out = System.out;
if (args.length < 2) {
out.println("This command should be run with Cassandra stopped!");
out.println("Usage: sstableofflinerelevel [--dry-run] <keyspace> <columnfamily>");
System.exit(1);
}
Util.initDatabaseDescriptor();
boolean dryRun = args[0].equals("--dry-run");
String keyspace = args[args.length - 2];
String columnfamily = args[args.length - 1];
Schema.instance.loadFromDisk(false);
if (Schema.instance.getTableMetadataRef(keyspace, columnfamily) == null)
throw new IllegalArgumentException(String.format("Unknown keyspace/columnFamily %s.%s", keyspace, columnfamily));
Keyspace ks = Keyspace.openWithoutSSTables(keyspace);
ColumnFamilyStore cfs = ks.getColumnFamilyStore(columnfamily);
Directories.SSTableLister lister = cfs.getDirectories().sstableLister(Directories.OnTxnErr.THROW).skipTemporary(true);
SetMultimap<File, SSTableReader> sstableMultimap = HashMultimap.create();
for (Map.Entry<Descriptor, Set<Component>> sstable : lister.list().entrySet()) {
if (sstable.getKey() != null) {
try {
SSTableReader reader = SSTableReader.open(sstable.getKey());
sstableMultimap.put(reader.descriptor.directory, reader);
} catch (Throwable t) {
out.println("Couldn't open sstable: " + sstable.getKey().filenameFor(Component.DATA));
Throwables.propagate(t);
}
}
}
if (sstableMultimap.isEmpty()) {
out.println("No sstables to relevel for " + keyspace + "." + columnfamily);
System.exit(1);
}
for (File directory : sstableMultimap.keySet()) {
if (!sstableMultimap.get(directory).isEmpty()) {
Relevel rl = new Relevel(sstableMultimap.get(directory));
out.println("For sstables in " + directory + ":");
rl.relevel(dryRun);
}
}
System.exit(0);
}
Aggregations