use of org.apache.cassandra.io.sstable.Component in project cassandra by apache.
the class CassandraStreamHeaderTest method serializerTest_EntireSSTableTransfer.
@Test
public void serializerTest_EntireSSTableTransfer() {
String ddl = "CREATE TABLE tbl (k INT PRIMARY KEY, v INT)";
TableMetadata metadata = CreateTableStatement.parse(ddl, "ks").build();
ComponentManifest manifest = new ComponentManifest(new LinkedHashMap<Component, Long>() {
{
put(Component.DATA, 100L);
}
});
CassandraStreamHeader header = CassandraStreamHeader.builder().withSSTableFormat(SSTableFormat.Type.BIG).withSSTableVersion(BigFormat.latestVersion).withSSTableLevel(0).withEstimatedKeys(0).withSections(Collections.emptyList()).withSerializationHeader(SerializationHeader.makeWithoutStats(metadata).toComponent()).withComponentManifest(manifest).isEntireSSTable(true).withFirstKey(Murmur3Partitioner.instance.decorateKey(ByteBufferUtil.EMPTY_BYTE_BUFFER)).withTableId(metadata.id).build();
SerializationUtils.assertSerializationCycle(header, new TestableCassandraStreamHeaderSerializer());
}
use of org.apache.cassandra.io.sstable.Component in project cassandra by apache.
the class ColumnFamilyStore method scrubDataDirectories.
/**
* Removes unnecessary files from the cf directory at startup: these include temp files, orphans, zero-length files
* and compacted sstables. Files that cannot be recognized will be ignored.
*/
public static void scrubDataDirectories(TableMetadata metadata) throws StartupException {
Directories directories = new Directories(metadata);
Set<File> cleanedDirectories = new HashSet<>();
// clear ephemeral snapshots that were not properly cleared last session (CASSANDRA-7357)
clearEphemeralSnapshots(directories);
directories.removeTemporaryDirectories();
logger.trace("Removing temporary or obsoleted files from unfinished operations for table {}", metadata.name);
if (!LifecycleTransaction.removeUnfinishedLeftovers(metadata))
throw new StartupException(StartupException.ERR_WRONG_DISK_STATE, String.format("Cannot remove temporary or obsoleted files for %s due to a problem with transaction " + "log files. Please check records with problems in the log messages above and fix them. " + "Refer to the 3.0 upgrading instructions in NEWS.txt " + "for a description of transaction log files.", metadata.toString()));
logger.trace("Further extra check for orphan sstable files for {}", metadata.name);
for (Map.Entry<Descriptor, Set<Component>> sstableFiles : directories.sstableLister(Directories.OnTxnErr.IGNORE).list().entrySet()) {
Descriptor desc = sstableFiles.getKey();
File directory = desc.directory;
Set<Component> components = sstableFiles.getValue();
if (!cleanedDirectories.contains(directory)) {
cleanedDirectories.add(directory);
for (File tmpFile : desc.getTemporaryFiles()) {
logger.info("Removing unfinished temporary file {}", tmpFile);
tmpFile.tryDelete();
}
}
File dataFile = new File(desc.filenameFor(Component.DATA));
if (components.contains(Component.DATA) && dataFile.length() > 0)
// everything appears to be in order... moving on.
continue;
// missing the DATA file! all components are orphaned
logger.warn("Removing orphans for {}: {}", desc, components);
for (Component component : components) {
File file = new File(desc.filenameFor(component));
if (file.exists())
FileUtils.deleteWithConfirm(desc.filenameFor(component));
}
}
// cleanup incomplete saved caches
Pattern tmpCacheFilePattern = Pattern.compile(metadata.keyspace + '-' + metadata.name + "-(Key|Row)Cache.*\\.tmp$");
File dir = new File(DatabaseDescriptor.getSavedCachesLocation());
if (dir.exists()) {
assert dir.isDirectory();
for (File file : dir.tryList()) if (tmpCacheFilePattern.matcher(file.name()).matches())
if (!file.tryDelete())
logger.warn("could not delete {}", file.absolutePath());
}
// also clean out any index leftovers.
for (IndexMetadata index : metadata.indexes) if (!index.isCustom()) {
TableMetadata indexMetadata = CassandraIndex.indexCfsMetadata(metadata, index);
scrubDataDirectories(indexMetadata);
}
}
use of org.apache.cassandra.io.sstable.Component in project cassandra by apache.
the class SSTableImporter method importNewSSTables.
/**
* Imports sstables from the directories given in options.srcPaths
*
* If import fails in any of the directories, that directory is skipped and the failed directories
* are returned so that the user can re-upload files or remove corrupt files.
*
* If one of the directories in srcPaths is not readable/does not exist, we exit immediately to let
* the user change permissions or similar on the directory.
*
* @param options
* @return list of failed directories
*/
@VisibleForTesting
synchronized List<String> importNewSSTables(Options options) {
logger.info("Loading new SSTables for {}/{}: {}", cfs.keyspace.getName(), cfs.getTableName(), options);
List<Pair<Directories.SSTableLister, String>> listers = getSSTableListers(options.srcPaths);
Set<Descriptor> currentDescriptors = new HashSet<>();
for (SSTableReader sstable : cfs.getSSTables(SSTableSet.CANONICAL)) currentDescriptors.add(sstable.descriptor);
List<String> failedDirectories = new ArrayList<>();
// verify first to avoid starting to copy sstables to the data directories and then have to abort.
if (options.verifySSTables || options.verifyTokens) {
for (Pair<Directories.SSTableLister, String> listerPair : listers) {
Directories.SSTableLister lister = listerPair.left;
String dir = listerPair.right;
for (Map.Entry<Descriptor, Set<Component>> entry : lister.list().entrySet()) {
Descriptor descriptor = entry.getKey();
if (!currentDescriptors.contains(entry.getKey())) {
try {
verifySSTableForImport(descriptor, entry.getValue(), options.verifyTokens, options.verifySSTables, options.extendedVerify);
} catch (Throwable t) {
if (dir != null) {
logger.error("Failed verifying sstable {} in directory {}", descriptor, dir, t);
failedDirectories.add(dir);
} else {
logger.error("Failed verifying sstable {}", descriptor, t);
throw new RuntimeException("Failed verifying sstable " + descriptor, t);
}
break;
}
}
}
}
}
Set<SSTableReader> newSSTables = new HashSet<>();
for (Pair<Directories.SSTableLister, String> listerPair : listers) {
Directories.SSTableLister lister = listerPair.left;
String dir = listerPair.right;
if (failedDirectories.contains(dir))
continue;
Set<MovedSSTable> movedSSTables = new HashSet<>();
Set<SSTableReader> newSSTablesPerDirectory = new HashSet<>();
for (Map.Entry<Descriptor, Set<Component>> entry : lister.list().entrySet()) {
try {
Descriptor oldDescriptor = entry.getKey();
if (currentDescriptors.contains(oldDescriptor))
continue;
File targetDir = getTargetDirectory(dir, oldDescriptor, entry.getValue());
Descriptor newDescriptor = cfs.getUniqueDescriptorFor(entry.getKey(), targetDir);
maybeMutateMetadata(entry.getKey(), options);
movedSSTables.add(new MovedSSTable(newDescriptor, entry.getKey(), entry.getValue()));
SSTableReader sstable = SSTableReader.moveAndOpenSSTable(cfs, entry.getKey(), newDescriptor, entry.getValue(), options.copyData);
newSSTablesPerDirectory.add(sstable);
} catch (Throwable t) {
newSSTablesPerDirectory.forEach(s -> s.selfRef().release());
if (dir != null) {
logger.error("Failed importing sstables in directory {}", dir, t);
failedDirectories.add(dir);
if (options.copyData) {
removeCopiedSSTables(movedSSTables);
} else {
moveSSTablesBack(movedSSTables);
}
movedSSTables.clear();
newSSTablesPerDirectory.clear();
break;
} else {
logger.error("Failed importing sstables from data directory - renamed sstables are: {}", movedSSTables);
throw new RuntimeException("Failed importing sstables", t);
}
}
}
newSSTables.addAll(newSSTablesPerDirectory);
}
if (newSSTables.isEmpty()) {
logger.info("No new SSTables were found for {}/{}", cfs.keyspace.getName(), cfs.getTableName());
return failedDirectories;
}
logger.info("Loading new SSTables and building secondary indexes for {}/{}: {}", cfs.keyspace.getName(), cfs.getTableName(), newSSTables);
try (Refs<SSTableReader> refs = Refs.ref(newSSTables)) {
cfs.getTracker().addSSTables(newSSTables);
for (SSTableReader reader : newSSTables) {
if (options.invalidateCaches && cfs.isRowCacheEnabled())
invalidateCachesForSSTable(reader.descriptor);
}
}
logger.info("Done loading load new SSTables for {}/{}", cfs.keyspace.getName(), cfs.getTableName());
return failedDirectories;
}
use of org.apache.cassandra.io.sstable.Component in project cassandra by apache.
the class StandaloneScrubber method main.
public static void main(String[] args) {
Options options = Options.parseArgs(args);
if (Boolean.getBoolean(Util.ALLOW_TOOL_REINIT_FOR_TEST))
// Necessary for testing
DatabaseDescriptor.toolInitialization(false);
else
Util.initDatabaseDescriptor();
try {
// load keyspace descriptions.
Schema.instance.loadFromDisk(false);
if (Schema.instance.getKeyspaceMetadata(options.keyspaceName) == null)
throw new IllegalArgumentException(String.format("Unknown keyspace %s", options.keyspaceName));
// Do not load sstables since they might be broken
Keyspace keyspace = Keyspace.openWithoutSSTables(options.keyspaceName);
ColumnFamilyStore cfs = null;
for (ColumnFamilyStore c : keyspace.getValidColumnFamilies(true, false, options.cfName)) {
if (c.name.equals(options.cfName)) {
cfs = c;
break;
}
}
if (cfs == null)
throw new IllegalArgumentException(String.format("Unknown table %s.%s", options.keyspaceName, options.cfName));
String snapshotName = "pre-scrub-" + currentTimeMillis();
OutputHandler handler = new OutputHandler.SystemOutput(options.verbose, options.debug);
Directories.SSTableLister lister = cfs.getDirectories().sstableLister(Directories.OnTxnErr.THROW).skipTemporary(true);
List<Pair<Descriptor, Set<Component>>> listResult = new ArrayList<>();
// create snapshot
for (Map.Entry<Descriptor, Set<Component>> entry : lister.list().entrySet()) {
Descriptor descriptor = entry.getKey();
Set<Component> components = entry.getValue();
if (!components.contains(Component.DATA))
continue;
listResult.add(Pair.create(descriptor, components));
File snapshotDirectory = Directories.getSnapshotDirectory(descriptor, snapshotName);
SSTableReader.createLinks(descriptor, components, snapshotDirectory.path());
}
System.out.println(String.format("Pre-scrub sstables snapshotted into snapshot %s", snapshotName));
if (options.headerFixMode != Options.HeaderFixMode.OFF) {
// Run the frozen-UDT checks _before_ the sstables are opened
List<String> logOutput = new ArrayList<>();
SSTableHeaderFix.Builder headerFixBuilder = SSTableHeaderFix.builder().logToList(logOutput).schemaCallback(() -> Schema.instance::getTableMetadata);
if (options.headerFixMode == Options.HeaderFixMode.VALIDATE)
headerFixBuilder = headerFixBuilder.dryRun();
for (Pair<Descriptor, Set<Component>> p : listResult) headerFixBuilder.withPath(Paths.get(p.left.filenameFor(Component.DATA)));
SSTableHeaderFix headerFix = headerFixBuilder.build();
try {
headerFix.execute();
} catch (Exception e) {
JVMStabilityInspector.inspectThrowable(e);
if (options.debug)
e.printStackTrace(System.err);
}
if (headerFix.hasChanges() || headerFix.hasError())
logOutput.forEach(System.out::println);
if (headerFix.hasError()) {
System.err.println("Errors in serialization-header detected, aborting.");
System.exit(1);
}
switch(options.headerFixMode) {
case VALIDATE_ONLY:
case FIX_ONLY:
System.out.printf("Not continuing with scrub, since '--%s %s' was specified.%n", HEADERFIX_OPTION, options.headerFixMode.asCommandLineOption());
System.exit(0);
case VALIDATE:
if (headerFix.hasChanges()) {
System.err.printf("Unfixed, but fixable errors in serialization-header detected, aborting. " + "Use a non-validating mode ('-e %s' or '-e %s') for --%s%n", Options.HeaderFixMode.FIX.asCommandLineOption(), Options.HeaderFixMode.FIX_ONLY.asCommandLineOption(), HEADERFIX_OPTION);
System.exit(2);
}
break;
case FIX:
break;
}
}
List<SSTableReader> sstables = new ArrayList<>();
// Open sstables
for (Pair<Descriptor, Set<Component>> pair : listResult) {
Descriptor descriptor = pair.left;
Set<Component> components = pair.right;
if (!components.contains(Component.DATA))
continue;
try {
SSTableReader sstable = SSTableReader.openNoValidation(descriptor, components, cfs);
sstables.add(sstable);
} catch (Exception e) {
JVMStabilityInspector.inspectThrowable(e);
System.err.println(String.format("Error Loading %s: %s", descriptor, e.getMessage()));
if (options.debug)
e.printStackTrace(System.err);
}
}
if (!options.manifestCheckOnly) {
for (SSTableReader sstable : sstables) {
try (LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.SCRUB, sstable)) {
// make sure originals are deleted and avoid NPE if index is missing, CASSANDRA-9591
txn.obsoleteOriginals();
try (Scrubber scrubber = new Scrubber(cfs, txn, options.skipCorrupted, handler, !options.noValidate, options.reinserOverflowedTTL)) {
scrubber.scrub();
} catch (Throwable t) {
if (!cfs.rebuildOnFailedScrub(t)) {
System.out.println(t.getMessage());
throw t;
}
}
} catch (Exception e) {
System.err.println(String.format("Error scrubbing %s: %s", sstable, e.getMessage()));
e.printStackTrace(System.err);
}
}
}
// Check (and repair) manifests
checkManifest(cfs.getCompactionStrategyManager(), cfs, sstables);
CompactionManager.instance.finishCompactionsAndShutdown(5, TimeUnit.MINUTES);
LifecycleTransaction.waitForDeletions();
// We need that to stop non daemonized threads
System.exit(0);
} catch (Exception e) {
System.err.println(e.getMessage());
if (options.debug)
e.printStackTrace(System.err);
System.exit(1);
}
}
use of org.apache.cassandra.io.sstable.Component in project cassandra by apache.
the class StandaloneVerifier method main.
public static void main(String[] args) {
Options options = Options.parseArgs(args);
initDatabaseDescriptorForTool();
System.out.println("sstableverify using the following options: " + options);
try {
// load keyspace descriptions.
Schema.instance.loadFromDisk(false);
boolean hasFailed = false;
if (Schema.instance.getTableMetadataRef(options.keyspaceName, options.cfName) == null)
throw new IllegalArgumentException(String.format("Unknown keyspace/table %s.%s", options.keyspaceName, options.cfName));
// Do not load sstables since they might be broken
Keyspace keyspace = Keyspace.openWithoutSSTables(options.keyspaceName);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(options.cfName);
OutputHandler handler = new OutputHandler.SystemOutput(options.verbose, options.debug);
Directories.SSTableLister lister = cfs.getDirectories().sstableLister(Directories.OnTxnErr.THROW).skipTemporary(true);
List<SSTableReader> sstables = new ArrayList<>();
// Verify sstables
for (Map.Entry<Descriptor, Set<Component>> entry : lister.list().entrySet()) {
Set<Component> components = entry.getValue();
if (!components.contains(Component.DATA) || !components.contains(Component.PRIMARY_INDEX))
continue;
try {
SSTableReader sstable = SSTableReader.openNoValidation(entry.getKey(), components, cfs);
sstables.add(sstable);
} catch (Exception e) {
JVMStabilityInspector.inspectThrowable(e);
System.err.println(String.format("Error Loading %s: %s", entry.getKey(), e.getMessage()));
if (options.debug)
e.printStackTrace(System.err);
System.exit(1);
}
}
Verifier.Options verifyOptions = Verifier.options().invokeDiskFailurePolicy(false).extendedVerification(options.extended).checkVersion(options.checkVersion).mutateRepairStatus(options.mutateRepairStatus).checkOwnsTokens(!options.tokens.isEmpty()).tokenLookup(ignore -> options.tokens).build();
handler.output("Running verifier with the following options: " + verifyOptions);
for (SSTableReader sstable : sstables) {
try (Verifier verifier = new Verifier(cfs, sstable, handler, true, verifyOptions)) {
verifier.verify();
} catch (Exception e) {
handler.warn(String.format("Error verifying %s: %s", sstable, e.getMessage()), e);
hasFailed = true;
}
}
CompactionManager.instance.finishCompactionsAndShutdown(5, TimeUnit.MINUTES);
// We need that to stop non daemonized threads
System.exit(hasFailed ? 1 : 0);
} catch (Exception e) {
System.err.println(e.getMessage());
if (options.debug)
e.printStackTrace(System.err);
System.exit(1);
}
}
Aggregations