use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class SSTableLevelResetter method main.
/**
* @param args a list of sstables whose metadata we are changing
*/
public static void main(String[] args) {
PrintStream out = System.out;
if (args.length == 0) {
out.println("This command should be run with Cassandra stopped!");
out.println("Usage: sstablelevelreset <keyspace> <table>");
System.exit(1);
}
if (!args[0].equals("--really-reset") || args.length != 3) {
out.println("This command should be run with Cassandra stopped, otherwise you will get very strange behavior");
out.println("Verify that Cassandra is not running and then execute the command like this:");
out.println("Usage: sstablelevelreset --really-reset <keyspace> <table>");
System.exit(1);
}
Util.initDatabaseDescriptor();
// So we have to explicitly call System.exit.
try {
// load keyspace descriptions.
Schema.instance.loadFromDisk(false);
String keyspaceName = args[1];
String columnfamily = args[2];
// validate columnfamily
if (Schema.instance.getTableMetadataRef(keyspaceName, columnfamily) == null) {
System.err.println("ColumnFamily not found: " + keyspaceName + "/" + columnfamily);
System.exit(1);
}
Keyspace keyspace = Keyspace.openWithoutSSTables(keyspaceName);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(columnfamily);
boolean foundSSTable = false;
for (Map.Entry<Descriptor, Set<Component>> sstable : cfs.getDirectories().sstableLister(Directories.OnTxnErr.THROW).list().entrySet()) {
if (sstable.getValue().contains(Component.STATS)) {
foundSSTable = true;
Descriptor descriptor = sstable.getKey();
StatsMetadata metadata = (StatsMetadata) descriptor.getMetadataSerializer().deserialize(descriptor, MetadataType.STATS);
if (metadata.sstableLevel > 0) {
out.println("Changing level from " + metadata.sstableLevel + " to 0 on " + descriptor.filenameFor(Component.DATA));
descriptor.getMetadataSerializer().mutateLevel(descriptor, 0);
} else {
out.println("Skipped " + descriptor.filenameFor(Component.DATA) + " since it is already on level 0");
}
}
}
if (!foundSSTable) {
out.println("Found no sstables, did you give the correct keyspace/table?");
}
} catch (Throwable t) {
JVMStabilityInspector.inspectThrowable(t);
t.printStackTrace();
System.exit(1);
}
System.exit(0);
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class StandaloneSplitter method main.
public static void main(String[] args) {
Options options = Options.parseArgs(args);
Util.initDatabaseDescriptor();
try {
// load keyspace descriptions.
Schema.instance.loadFromDisk(false);
String ksName = null;
String cfName = null;
Map<Descriptor, Set<Component>> parsedFilenames = new HashMap<Descriptor, Set<Component>>();
for (String filename : options.filenames) {
File file = new File(filename);
if (!file.exists()) {
System.out.println("Skipping inexisting file " + file);
continue;
}
Descriptor desc = SSTable.tryDescriptorFromFilename(file);
if (desc == null) {
System.out.println("Skipping non sstable file " + file);
continue;
}
if (ksName == null)
ksName = desc.ksname;
else if (!ksName.equals(desc.ksname))
throw new IllegalArgumentException("All sstables must be part of the same keyspace");
if (cfName == null)
cfName = desc.cfname;
else if (!cfName.equals(desc.cfname))
throw new IllegalArgumentException("All sstables must be part of the same table");
Set<Component> components = new HashSet<Component>(Arrays.asList(new Component[] { Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.COMPRESSION_INFO, Component.STATS }));
Iterator<Component> iter = components.iterator();
while (iter.hasNext()) {
Component component = iter.next();
if (!(new File(desc.filenameFor(component)).exists()))
iter.remove();
}
parsedFilenames.put(desc, components);
}
if (ksName == null || cfName == null) {
System.err.println("No valid sstables to split");
System.exit(1);
}
// Do not load sstables since they might be broken
Keyspace keyspace = Keyspace.openWithoutSSTables(ksName);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName);
String snapshotName = "pre-split-" + System.currentTimeMillis();
List<SSTableReader> sstables = new ArrayList<>();
for (Map.Entry<Descriptor, Set<Component>> fn : parsedFilenames.entrySet()) {
try {
SSTableReader sstable = SSTableReader.openNoValidation(fn.getKey(), fn.getValue(), cfs);
if (!isSSTableLargerEnough(sstable, options.sizeInMB)) {
System.out.println(String.format("Skipping %s: it's size (%.3f MB) is less than the split size (%d MB)", sstable.getFilename(), ((sstable.onDiskLength() * 1.0d) / 1024L) / 1024L, options.sizeInMB));
continue;
}
sstables.add(sstable);
if (options.snapshot) {
File snapshotDirectory = Directories.getSnapshotDirectory(sstable.descriptor, snapshotName);
sstable.createLinks(snapshotDirectory.getPath());
}
} catch (Exception e) {
JVMStabilityInspector.inspectThrowable(e);
System.err.println(String.format("Error Loading %s: %s", fn.getKey(), e.getMessage()));
if (options.debug)
e.printStackTrace(System.err);
}
}
if (sstables.isEmpty()) {
System.out.println("No sstables needed splitting.");
System.exit(0);
}
if (options.snapshot)
System.out.println(String.format("Pre-split sstables snapshotted into snapshot %s", snapshotName));
for (SSTableReader sstable : sstables) {
try (LifecycleTransaction transaction = LifecycleTransaction.offline(OperationType.UNKNOWN, sstable)) {
new SSTableSplitter(cfs, transaction, options.sizeInMB).split();
} catch (Exception e) {
System.err.println(String.format("Error splitting %s: %s", sstable, e.getMessage()));
if (options.debug)
e.printStackTrace(System.err);
sstable.selfRef().release();
}
}
CompactionManager.instance.finishCompactionsAndShutdown(5, TimeUnit.MINUTES);
LifecycleTransaction.waitForDeletions();
// We need that to stop non daemonized threads
System.exit(0);
} catch (Exception e) {
System.err.println(e.getMessage());
if (options.debug)
e.printStackTrace(System.err);
System.exit(1);
}
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class CachingBench method testSetup.
public void testSetup(String compactionClass, String compressorClass, DiskAccessMode mode, boolean cacheEnabled) throws Throwable {
id.set(0);
compactionTimeNanos = 0;
ChunkCache.instance.enable(cacheEnabled);
DatabaseDescriptor.setDiskAccessMode(mode);
alterTable("ALTER TABLE %s WITH compaction = { 'class' : '" + compactionClass + "' };");
alterTable("ALTER TABLE %s WITH compression = { 'sstable_compression' : '" + compressorClass + "' };");
ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
cfs.disableAutoCompaction();
long onStartTime = System.currentTimeMillis();
ExecutorService es = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
List<Future<?>> tasks = new ArrayList<>();
for (int ti = 0; ti < 1; ++ti) {
Random rand = new Random(ti);
tasks.add(es.submit(() -> {
for (int i = 0; i < ITERS; ++i) try {
pushData(rand, COUNT);
readAndDelete(rand, COUNT / 3);
} catch (Throwable e) {
throw new AssertionError(e);
}
}));
}
for (Future<?> task : tasks) task.get();
flush();
long onEndTime = System.currentTimeMillis();
int startRowCount = countRows(cfs);
int startTombCount = countTombstoneMarkers(cfs);
int startRowDeletions = countRowDeletions(cfs);
int startTableCount = cfs.getLiveSSTables().size();
long startSize = SSTableReader.getTotalBytes(cfs.getLiveSSTables());
System.out.println("\nCompession: " + cfs.getCompressionParameters().toString());
System.out.println("Reader " + cfs.getLiveSSTables().iterator().next().getFileDataInput(0).toString());
if (cacheEnabled)
System.out.format("Cache size %s requests %,d hit ratio %f\n", FileUtils.stringifyFileSize(ChunkCache.instance.metrics.size.getValue()), ChunkCache.instance.metrics.requests.getCount(), ChunkCache.instance.metrics.hitRate.getValue());
else {
Assert.assertTrue("Chunk cache had requests: " + ChunkCache.instance.metrics.requests.getCount(), ChunkCache.instance.metrics.requests.getCount() < COUNT);
System.out.println("Cache disabled");
}
System.out.println(String.format("Operations completed in %.3fs", (onEndTime - onStartTime) * 1e-3));
if (!CONCURRENT_COMPACTIONS)
System.out.println(String.format(", out of which %.3f for non-concurrent compaction", compactionTimeNanos * 1e-9));
else
System.out.println();
String hashesBefore = getHashes();
long startTime = System.currentTimeMillis();
CompactionManager.instance.performMaximal(cfs, true);
long endTime = System.currentTimeMillis();
int endRowCount = countRows(cfs);
int endTombCount = countTombstoneMarkers(cfs);
int endRowDeletions = countRowDeletions(cfs);
int endTableCount = cfs.getLiveSSTables().size();
long endSize = SSTableReader.getTotalBytes(cfs.getLiveSSTables());
System.out.println(String.format("Major compaction completed in %.3fs", (endTime - startTime) * 1e-3));
System.out.println(String.format("At start: %,12d tables %12s %,12d rows %,12d deleted rows %,12d tombstone markers", startTableCount, FileUtils.stringifyFileSize(startSize), startRowCount, startRowDeletions, startTombCount));
System.out.println(String.format("At end: %,12d tables %12s %,12d rows %,12d deleted rows %,12d tombstone markers", endTableCount, FileUtils.stringifyFileSize(endSize), endRowCount, endRowDeletions, endTombCount));
String hashesAfter = getHashes();
Assert.assertEquals(hashesBefore, hashesAfter);
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class StatusLogger method log.
public static void log() {
MBeanServer server = ManagementFactory.getPlatformMBeanServer();
// everything from o.a.c.concurrent
logger.info(String.format("%-25s%10s%10s%15s%10s%18s", "Pool Name", "Active", "Pending", "Completed", "Blocked", "All Time Blocked"));
for (Map.Entry<String, String> tpool : ThreadPoolMetrics.getJmxThreadPools(server).entries()) {
logger.info(String.format("%-25s%10s%10s%15s%10s%18s%n", tpool.getValue(), ThreadPoolMetrics.getJmxMetric(server, tpool.getKey(), tpool.getValue(), "ActiveTasks"), ThreadPoolMetrics.getJmxMetric(server, tpool.getKey(), tpool.getValue(), "PendingTasks"), ThreadPoolMetrics.getJmxMetric(server, tpool.getKey(), tpool.getValue(), "CompletedTasks"), ThreadPoolMetrics.getJmxMetric(server, tpool.getKey(), tpool.getValue(), "CurrentlyBlockedTasks"), ThreadPoolMetrics.getJmxMetric(server, tpool.getKey(), tpool.getValue(), "TotalBlockedTasks")));
}
// one offs
logger.info(String.format("%-25s%10s%10s", "CompactionManager", CompactionManager.instance.getActiveCompactions(), CompactionManager.instance.getPendingTasks()));
int pendingLargeMessages = 0;
for (int n : MessagingService.instance().getLargeMessagePendingTasks().values()) {
pendingLargeMessages += n;
}
int pendingSmallMessages = 0;
for (int n : MessagingService.instance().getSmallMessagePendingTasks().values()) {
pendingSmallMessages += n;
}
logger.info(String.format("%-25s%10s%10s", "MessagingService", "n/a", pendingLargeMessages + "/" + pendingSmallMessages));
// Global key/row cache information
AutoSavingCache<KeyCacheKey, RowIndexEntry> keyCache = CacheService.instance.keyCache;
AutoSavingCache<RowCacheKey, IRowCacheEntry> rowCache = CacheService.instance.rowCache;
int keyCacheKeysToSave = DatabaseDescriptor.getKeyCacheKeysToSave();
int rowCacheKeysToSave = DatabaseDescriptor.getRowCacheKeysToSave();
logger.info(String.format("%-25s%10s%25s%25s", "Cache Type", "Size", "Capacity", "KeysToSave"));
logger.info(String.format("%-25s%10s%25s%25s", "KeyCache", keyCache.weightedSize(), keyCache.getCapacity(), keyCacheKeysToSave == Integer.MAX_VALUE ? "all" : keyCacheKeysToSave));
logger.info(String.format("%-25s%10s%25s%25s", "RowCache", rowCache.weightedSize(), rowCache.getCapacity(), rowCacheKeysToSave == Integer.MAX_VALUE ? "all" : rowCacheKeysToSave));
// per-CF stats
logger.info(String.format("%-25s%20s", "Table", "Memtable ops,data"));
for (ColumnFamilyStore cfs : ColumnFamilyStore.all()) {
logger.info(String.format("%-25s%20s", cfs.keyspace.getName() + "." + cfs.name, cfs.metric.memtableColumnsCount.getValue() + "," + cfs.metric.memtableLiveDataSize.getValue()));
}
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class BatchlogManagerTest method testDelete.
@Test
public void testDelete() {
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1);
TableMetadata cfm = cfs.metadata();
new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), ByteBufferUtil.bytes("1234")).clustering("c").add("val", "val" + 1234).build().applyUnsafe();
DecoratedKey dk = cfs.decorateKey(ByteBufferUtil.bytes("1234"));
ImmutableBTreePartition results = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, dk).build());
Iterator<Row> iter = results.iterator();
assert iter.hasNext();
Mutation mutation = new Mutation(PartitionUpdate.fullPartitionDelete(cfm, dk, FBUtilities.timestampMicros(), FBUtilities.nowInSeconds()));
mutation.applyUnsafe();
Util.assertEmpty(Util.cmd(cfs, dk).build());
}
Aggregations