use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.
the class SSTableMetadataViewer method main.
/**
* @param args a list of sstables whose metadata we're interested in
*/
public static void main(String[] args) throws IOException {
PrintStream out = System.out;
Option optGcgs = new Option(null, GCGS_KEY, true, "The " + GCGS_KEY + " to use when calculating droppable tombstones");
Options options = new Options();
options.addOption(optGcgs);
CommandLine cmd = null;
CommandLineParser parser = new PosixParser();
try {
cmd = parser.parse(options, args);
} catch (ParseException e) {
printHelp(options, out);
}
if (cmd.getArgs().length == 0) {
printHelp(options, out);
}
int gcgs = Integer.parseInt(cmd.getOptionValue(GCGS_KEY, "0"));
Util.initDatabaseDescriptor();
for (String fname : cmd.getArgs()) {
if (new File(fname).exists()) {
Descriptor descriptor = Descriptor.fromFilename(fname);
Map<MetadataType, MetadataComponent> metadata = descriptor.getMetadataSerializer().deserialize(descriptor, EnumSet.allOf(MetadataType.class));
ValidationMetadata validation = (ValidationMetadata) metadata.get(MetadataType.VALIDATION);
StatsMetadata stats = (StatsMetadata) metadata.get(MetadataType.STATS);
CompactionMetadata compaction = (CompactionMetadata) metadata.get(MetadataType.COMPACTION);
CompressionMetadata compression = null;
File compressionFile = new File(descriptor.filenameFor(Component.COMPRESSION_INFO));
if (compressionFile.exists())
compression = CompressionMetadata.create(fname);
SerializationHeader.Component header = (SerializationHeader.Component) metadata.get(MetadataType.HEADER);
out.printf("SSTable: %s%n", descriptor);
if (validation != null) {
out.printf("Partitioner: %s%n", validation.partitioner);
out.printf("Bloom Filter FP chance: %f%n", validation.bloomFilterFPChance);
}
if (stats != null) {
out.printf("Minimum timestamp: %s%n", stats.minTimestamp);
out.printf("Maximum timestamp: %s%n", stats.maxTimestamp);
out.printf("SSTable min local deletion time: %s%n", stats.minLocalDeletionTime);
out.printf("SSTable max local deletion time: %s%n", stats.maxLocalDeletionTime);
out.printf("Compressor: %s%n", compression != null ? compression.compressor().getClass().getName() : "-");
if (compression != null)
out.printf("Compression ratio: %s%n", stats.compressionRatio);
out.printf("TTL min: %s%n", stats.minTTL);
out.printf("TTL max: %s%n", stats.maxTTL);
if (validation != null && header != null)
printMinMaxToken(descriptor, FBUtilities.newPartitioner(descriptor), header.getKeyType(), out);
if (header != null && header.getClusteringTypes().size() == stats.minClusteringValues.size()) {
List<AbstractType<?>> clusteringTypes = header.getClusteringTypes();
List<ByteBuffer> minClusteringValues = stats.minClusteringValues;
List<ByteBuffer> maxClusteringValues = stats.maxClusteringValues;
String[] minValues = new String[clusteringTypes.size()];
String[] maxValues = new String[clusteringTypes.size()];
for (int i = 0; i < clusteringTypes.size(); i++) {
minValues[i] = clusteringTypes.get(i).getString(minClusteringValues.get(i));
maxValues[i] = clusteringTypes.get(i).getString(maxClusteringValues.get(i));
}
out.printf("minClustringValues: %s%n", Arrays.toString(minValues));
out.printf("maxClustringValues: %s%n", Arrays.toString(maxValues));
}
out.printf("Estimated droppable tombstones: %s%n", stats.getEstimatedDroppableTombstoneRatio((int) (System.currentTimeMillis() / 1000) - gcgs));
out.printf("SSTable Level: %d%n", stats.sstableLevel);
out.printf("Repaired at: %d%n", stats.repairedAt);
out.printf("Pending repair: %s%n", stats.pendingRepair);
out.printf("Replay positions covered: %s%n", stats.commitLogIntervals);
out.printf("totalColumnsSet: %s%n", stats.totalColumnsSet);
out.printf("totalRows: %s%n", stats.totalRows);
out.println("Estimated tombstone drop times:");
for (Map.Entry<Number, long[]> entry : stats.estimatedTombstoneDropTime.getAsMap().entrySet()) {
out.printf("%-10s:%10s%n", entry.getKey().intValue(), entry.getValue()[0]);
}
printHistograms(stats, out);
}
if (compaction != null) {
out.printf("Estimated cardinality: %s%n", compaction.cardinalityEstimator.cardinality());
}
if (header != null) {
EncodingStats encodingStats = header.getEncodingStats();
AbstractType<?> keyType = header.getKeyType();
List<AbstractType<?>> clusteringTypes = header.getClusteringTypes();
Map<ByteBuffer, AbstractType<?>> staticColumns = header.getStaticColumns();
Map<String, String> statics = staticColumns.entrySet().stream().collect(Collectors.toMap(e -> UTF8Type.instance.getString(e.getKey()), e -> e.getValue().toString()));
Map<ByteBuffer, AbstractType<?>> regularColumns = header.getRegularColumns();
Map<String, String> regulars = regularColumns.entrySet().stream().collect(Collectors.toMap(e -> UTF8Type.instance.getString(e.getKey()), e -> e.getValue().toString()));
out.printf("EncodingStats minTTL: %s%n", encodingStats.minTTL);
out.printf("EncodingStats minLocalDeletionTime: %s%n", encodingStats.minLocalDeletionTime);
out.printf("EncodingStats minTimestamp: %s%n", encodingStats.minTimestamp);
out.printf("KeyType: %s%n", keyType.toString());
out.printf("ClusteringTypes: %s%n", clusteringTypes.toString());
out.printf("StaticColumns: {%s}%n", FBUtilities.toString(statics));
out.printf("RegularColumns: {%s}%n", FBUtilities.toString(regulars));
}
} else {
out.println("No such file: " + fname);
}
}
}
use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.
the class DirectoriesTest method createFakeSSTable.
private static void createFakeSSTable(File dir, String cf, int gen, List<File> addTo) throws IOException {
Descriptor desc = new Descriptor(dir, KS, cf, gen, SSTableFormat.Type.BIG);
for (Component c : new Component[] { Component.DATA, Component.PRIMARY_INDEX, Component.FILTER }) {
File f = new File(desc.filenameFor(c));
f.createNewFile();
addTo.add(f);
}
}
use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.
the class DirectoriesTest method testSecondaryIndexDirectories.
@Test
public void testSecondaryIndexDirectories() {
TableMetadata.Builder builder = TableMetadata.builder(KS, "cf").addPartitionKeyColumn("thekey", UTF8Type.instance).addClusteringColumn("col", UTF8Type.instance);
ColumnIdentifier col = ColumnIdentifier.getInterned("col", true);
IndexMetadata indexDef = IndexMetadata.fromIndexTargets(Collections.singletonList(new IndexTarget(col, IndexTarget.Type.VALUES)), "idx", IndexMetadata.Kind.KEYS, Collections.emptyMap());
builder.indexes(Indexes.of(indexDef));
TableMetadata PARENT_CFM = builder.build();
TableMetadata INDEX_CFM = CassandraIndex.indexCfsMetadata(PARENT_CFM, indexDef);
Directories parentDirectories = new Directories(PARENT_CFM);
Directories indexDirectories = new Directories(INDEX_CFM);
// secondary index has its own directory
for (File dir : indexDirectories.getCFDirectories()) {
assertEquals(cfDir(INDEX_CFM), dir);
}
Descriptor parentDesc = new Descriptor(parentDirectories.getDirectoryForNewSSTables(), KS, PARENT_CFM.name, 0, SSTableFormat.Type.BIG);
Descriptor indexDesc = new Descriptor(indexDirectories.getDirectoryForNewSSTables(), KS, INDEX_CFM.name, 0, SSTableFormat.Type.BIG);
// snapshot dir should be created under its parent's
File parentSnapshotDirectory = Directories.getSnapshotDirectory(parentDesc, "test");
File indexSnapshotDirectory = Directories.getSnapshotDirectory(indexDesc, "test");
assertEquals(parentSnapshotDirectory, indexSnapshotDirectory.getParentFile());
// check if snapshot directory exists
parentSnapshotDirectory.mkdirs();
assertTrue(parentDirectories.snapshotExists("test"));
assertTrue(indexDirectories.snapshotExists("test"));
// check their creation time
assertEquals(parentDirectories.snapshotCreationTime("test"), indexDirectories.snapshotCreationTime("test"));
// check true snapshot size
Descriptor parentSnapshot = new Descriptor(parentSnapshotDirectory, KS, PARENT_CFM.name, 0, SSTableFormat.Type.BIG);
createFile(parentSnapshot.filenameFor(Component.DATA), 30);
Descriptor indexSnapshot = new Descriptor(indexSnapshotDirectory, KS, INDEX_CFM.name, 0, SSTableFormat.Type.BIG);
createFile(indexSnapshot.filenameFor(Component.DATA), 40);
assertEquals(30, parentDirectories.trueSnapshotsSize());
assertEquals(40, indexDirectories.trueSnapshotsSize());
// check snapshot details
Map<String, Pair<Long, Long>> parentSnapshotDetail = parentDirectories.getSnapshotDetails();
assertTrue(parentSnapshotDetail.containsKey("test"));
assertEquals(30L, parentSnapshotDetail.get("test").right.longValue());
Map<String, Pair<Long, Long>> indexSnapshotDetail = indexDirectories.getSnapshotDetails();
assertTrue(indexSnapshotDetail.containsKey("test"));
assertEquals(40L, indexSnapshotDetail.get("test").right.longValue());
// check backup directory
File parentBackupDirectory = Directories.getBackupsDirectory(parentDesc);
File indexBackupDirectory = Directories.getBackupsDirectory(indexDesc);
assertEquals(parentBackupDirectory, indexBackupDirectory.getParentFile());
}
use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.
the class RealTransactionsTest method replaceSSTable.
private SSTableReader replaceSSTable(ColumnFamilyStore cfs, LifecycleTransaction txn, boolean fail) {
List<SSTableReader> newsstables = null;
int nowInSec = FBUtilities.nowInSeconds();
try (CompactionController controller = new CompactionController(cfs, txn.originals(), cfs.gcBefore(FBUtilities.nowInSeconds()))) {
try (SSTableRewriter rewriter = SSTableRewriter.constructKeepingOriginals(txn, false, 1000);
AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategyManager().getScanners(txn.originals());
CompactionIterator ci = new CompactionIterator(txn.opType(), scanners.scanners, controller, nowInSec, txn.opId())) {
long lastCheckObsoletion = System.nanoTime();
File directory = txn.originals().iterator().next().descriptor.directory;
Descriptor desc = cfs.newSSTableDescriptor(directory);
TableMetadataRef metadata = Schema.instance.getTableMetadataRef(desc);
rewriter.switchWriter(SSTableWriter.create(metadata, desc, 0, 0, null, 0, SerializationHeader.make(cfs.metadata(), txn.originals()), cfs.indexManager.listIndexes(), txn));
while (ci.hasNext()) {
rewriter.append(ci.next());
if (System.nanoTime() - lastCheckObsoletion > TimeUnit.MINUTES.toNanos(1L)) {
controller.maybeRefreshOverlaps();
lastCheckObsoletion = System.nanoTime();
}
}
if (!fail)
newsstables = rewriter.finish();
else
rewriter.abort();
}
}
assertTrue(fail || newsstables != null);
if (newsstables != null) {
Assert.assertEquals(1, newsstables.size());
return newsstables.iterator().next();
}
return null;
}
use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.
the class SSTableFlushObserverTest method testFlushObserver.
@Test
public void testFlushObserver() {
TableMetadata cfm = TableMetadata.builder(KS_NAME, CF_NAME).addPartitionKeyColumn("id", UTF8Type.instance).addRegularColumn("first_name", UTF8Type.instance).addRegularColumn("age", Int32Type.instance).addRegularColumn("height", LongType.instance).build();
LifecycleTransaction transaction = LifecycleTransaction.offline(OperationType.COMPACTION);
FlushObserver observer = new FlushObserver();
String sstableDirectory = DatabaseDescriptor.getAllDataFileLocations()[0];
File directory = new File(sstableDirectory + File.pathSeparator + KS_NAME + File.pathSeparator + CF_NAME);
directory.deleteOnExit();
if (!directory.exists() && !directory.mkdirs())
throw new FSWriteError(new IOException("failed to create tmp directory"), directory.getAbsolutePath());
SSTableFormat.Type sstableFormat = SSTableFormat.Type.current();
BigTableWriter writer = new BigTableWriter(new Descriptor(sstableFormat.info.getLatestVersion(), directory, KS_NAME, CF_NAME, 0, sstableFormat), 10L, 0L, null, TableMetadataRef.forOfflineTools(cfm), new MetadataCollector(cfm.comparator).sstableLevel(0), new SerializationHeader(true, cfm, cfm.regularAndStaticColumns(), EncodingStats.NO_STATS), Collections.singletonList(observer), transaction);
SSTableReader reader = null;
Multimap<ByteBuffer, Cell> expected = ArrayListMultimap.create();
try {
final long now = System.currentTimeMillis();
ByteBuffer key = UTF8Type.instance.fromString("key1");
expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(27)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("jack")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(183L))));
writer.append(new RowIterator(cfm, key.duplicate(), Collections.singletonList(buildRow(expected.get(key)))));
key = UTF8Type.instance.fromString("key2");
expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(30)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("jim")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(180L))));
writer.append(new RowIterator(cfm, key, Collections.singletonList(buildRow(expected.get(key)))));
key = UTF8Type.instance.fromString("key3");
expected.putAll(key, Arrays.asList(BufferCell.live(getColumn(cfm, "age"), now, Int32Type.instance.decompose(30)), BufferCell.live(getColumn(cfm, "first_name"), now, UTF8Type.instance.fromString("ken")), BufferCell.live(getColumn(cfm, "height"), now, LongType.instance.decompose(178L))));
writer.append(new RowIterator(cfm, key, Collections.singletonList(buildRow(expected.get(key)))));
reader = writer.finish(true);
} finally {
FileUtils.closeQuietly(writer);
}
Assert.assertTrue(observer.isComplete);
Assert.assertEquals(expected.size(), observer.rows.size());
for (Pair<ByteBuffer, Long> e : observer.rows.keySet()) {
ByteBuffer key = e.left;
Long indexPosition = e.right;
try (FileDataInput index = reader.ifile.createReader(indexPosition)) {
ByteBuffer indexKey = ByteBufferUtil.readWithShortLength(index);
Assert.assertEquals(0, UTF8Type.instance.compare(key, indexKey));
} catch (IOException ex) {
throw new FSReadError(ex, reader.getIndexFilename());
}
Assert.assertEquals(expected.get(key), observer.rows.get(e));
}
}
Aggregations