use of org.apache.cassandra.io.sstable.Descriptor in project eiger by wlloyd.
the class StreamIn method getContextMapping.
/** Translates remote files to local files by creating a local sstable per remote sstable. */
public static PendingFile getContextMapping(PendingFile remote) throws IOException {
/* Create a local sstable for each remote sstable */
Descriptor remotedesc = remote.desc;
if (!remotedesc.isStreamCompatible())
throw new UnsupportedOperationException(String.format("SSTable %s is not compatible with current version %s", remote.getFilename(), Descriptor.CURRENT_VERSION));
// new local sstable
Table table = Table.open(remotedesc.ksname);
ColumnFamilyStore cfStore = table.getColumnFamilyStore(remotedesc.cfname);
Descriptor localdesc = Descriptor.fromFilename(cfStore.getFlushPath(remote.size, remote.desc.version));
return new PendingFile(localdesc, remote);
}
use of org.apache.cassandra.io.sstable.Descriptor in project eiger by wlloyd.
the class StreamOut method createPendingFiles.
// called prior to sending anything.
private static List<PendingFile> createPendingFiles(Iterable<SSTableReader> sstables, Collection<Range<Token>> ranges, OperationType type) {
List<PendingFile> pending = new ArrayList<PendingFile>();
for (SSTableReader sstable : sstables) {
Descriptor desc = sstable.descriptor;
List<Pair<Long, Long>> sections = sstable.getPositionsForRanges(ranges);
if (sections.isEmpty()) {
// A reference was acquired on the sstable and we won't stream it
sstable.releaseReference();
continue;
}
pending.add(new PendingFile(sstable, desc, SSTable.COMPONENT_DATA, sections, type, sstable.estimatedKeysForRanges(ranges)));
}
logger.info("Stream context metadata {}, {} sstables.", pending, Iterables.size(sstables));
return pending;
}
use of org.apache.cassandra.io.sstable.Descriptor in project eiger by wlloyd.
the class ColumnFamilyStoreTest method testBackupAfterFlush.
@Test
public void testBackupAfterFlush() throws Throwable {
ColumnFamilyStore cfs = insertKey1Key2();
for (int version = 1; version <= 2; ++version) {
Descriptor existing = new Descriptor(cfs.directories.getDirectoryForNewSSTables(1), "Keyspace2", "Standard1", version, false);
Descriptor desc = new Descriptor(Directories.getBackupsDirectory(existing), "Keyspace2", "Standard1", version, false);
for (Component c : new Component[] { Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.STATS }) assertTrue("can not find backedup file:" + desc.filenameFor(c), new File(desc.filenameFor(c)).exists());
}
}
use of org.apache.cassandra.io.sstable.Descriptor in project eiger by wlloyd.
the class ColumnFamilyStore method scrubDataDirectories.
/**
* Removes unnecessary files from the cf directory at startup: these include temp files, orphans, zero-length files
* and compacted sstables. Files that cannot be recognized will be ignored.
*/
public static void scrubDataDirectories(String table, String columnFamily) {
logger.debug("Removing compacted SSTable files from {} (see http://wiki.apache.org/cassandra/MemtableSSTable)", columnFamily);
Directories directories = Directories.create(table, columnFamily);
for (Map.Entry<Descriptor, Set<Component>> sstableFiles : directories.sstableLister().list().entrySet()) {
Descriptor desc = sstableFiles.getKey();
Set<Component> components = sstableFiles.getValue();
if (components.contains(Component.COMPACTED_MARKER) || desc.temporary) {
try {
SSTable.delete(desc, components);
} catch (IOException e) {
throw new IOError(e);
}
continue;
}
File dataFile = new File(desc.filenameFor(Component.DATA));
if (components.contains(Component.DATA) && dataFile.length() > 0)
// everything appears to be in order... moving on.
continue;
// missing the DATA file! all components are orphaned
logger.warn("Removing orphans for {}: {}", desc, components);
for (Component component : components) {
try {
FileUtils.deleteWithConfirm(desc.filenameFor(component));
} catch (IOException e) {
throw new IOError(e);
}
}
}
// cleanup incomplete saved caches
Pattern tmpCacheFilePattern = Pattern.compile(table + "-" + columnFamily + "-(Key|Row)Cache.*\\.tmp$");
File dir = new File(DatabaseDescriptor.getSavedCachesLocation());
if (dir.exists()) {
assert dir.isDirectory();
for (File file : dir.listFiles()) if (tmpCacheFilePattern.matcher(file.getName()).matches())
if (!file.delete())
logger.warn("could not delete " + file.getAbsolutePath());
}
// also clean out any index leftovers.
CFMetaData cfm = Schema.instance.getCFMetaData(table, columnFamily);
if (// secondary indexes aren't stored in DD.
cfm != null) {
for (ColumnDefinition def : cfm.getColumn_metadata().values()) scrubDataDirectories(table, cfm.indexColumnFamilyName(def));
}
}
use of org.apache.cassandra.io.sstable.Descriptor in project eiger by wlloyd.
the class ColumnFamilyStore method createColumnFamilyStore.
public static synchronized ColumnFamilyStore createColumnFamilyStore(Table table, String columnFamily, IPartitioner partitioner, CFMetaData metadata) {
// get the max generation number, to prevent generation conflicts
Directories directories = Directories.create(table.name, columnFamily);
Directories.SSTableLister lister = directories.sstableLister().includeBackups(true);
List<Integer> generations = new ArrayList<Integer>();
for (Map.Entry<Descriptor, Set<Component>> entry : lister.list().entrySet()) {
Descriptor desc = entry.getKey();
generations.add(desc.generation);
if (!desc.isCompatible())
throw new RuntimeException(String.format("Can't open incompatible SSTable! Current version %s, found file: %s", Descriptor.CURRENT_VERSION, desc));
}
Collections.sort(generations);
int value = (generations.size() > 0) ? (generations.get(generations.size() - 1)) : 0;
return new ColumnFamilyStore(table, columnFamily, partitioner, value, metadata, directories);
}
Aggregations