use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.
the class SSTableExport method main.
/**
* Given arguments specifying an SSTable, and optionally an output file, export the contents of the SSTable to JSON.
*
* @param args
* command lines arguments
* @throws ConfigurationException
* on configuration failure (wrong params given)
*/
public static void main(String[] args) throws ConfigurationException {
CommandLineParser parser = new PosixParser();
try {
cmd = parser.parse(options, args);
} catch (ParseException e1) {
System.err.println(e1.getMessage());
printUsage();
System.exit(1);
}
if (cmd.getArgs().length != 1) {
System.err.println("You must supply exactly one sstable");
printUsage();
System.exit(1);
}
String[] keys = cmd.getOptionValues(KEY_OPTION);
HashSet<String> excludes = new HashSet<>(Arrays.asList(cmd.getOptionValues(EXCLUDE_KEY_OPTION) == null ? new String[0] : cmd.getOptionValues(EXCLUDE_KEY_OPTION)));
String ssTableFileName = new File(cmd.getArgs()[0]).getAbsolutePath();
if (!new File(ssTableFileName).exists()) {
System.err.println("Cannot find file " + ssTableFileName);
System.exit(1);
}
Descriptor desc = Descriptor.fromFilename(ssTableFileName);
try {
TableMetadata metadata = metadataFromSSTable(desc);
if (cmd.hasOption(ENUMERATE_KEYS_OPTION)) {
try (KeyIterator iter = new KeyIterator(desc, metadata)) {
JsonTransformer.keysToJson(null, iterToStream(iter), cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
}
} else {
SSTableReader sstable = SSTableReader.openNoValidation(desc, TableMetadataRef.forOfflineTools(metadata));
IPartitioner partitioner = sstable.getPartitioner();
final ISSTableScanner currentScanner;
if ((keys != null) && (keys.length > 0)) {
List<AbstractBounds<PartitionPosition>> bounds = Arrays.stream(keys).filter(key -> !excludes.contains(key)).map(metadata.partitionKeyType::fromString).map(partitioner::decorateKey).sorted().map(DecoratedKey::getToken).map(token -> new Bounds<>(token.minKeyBound(), token.maxKeyBound())).collect(Collectors.toList());
currentScanner = sstable.getScanner(bounds.iterator());
} else {
currentScanner = sstable.getScanner();
}
Stream<UnfilteredRowIterator> partitions = iterToStream(currentScanner).filter(i -> excludes.isEmpty() || !excludes.contains(metadata.partitionKeyType.getString(i.partitionKey().getKey())));
if (cmd.hasOption(DEBUG_OUTPUT_OPTION)) {
AtomicLong position = new AtomicLong();
partitions.forEach(partition -> {
position.set(currentScanner.getCurrentPosition());
if (!partition.partitionLevelDeletion().isLive()) {
System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + partition.partitionLevelDeletion());
}
if (!partition.staticRow().isEmpty()) {
System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + partition.staticRow().toString(metadata, true));
}
partition.forEachRemaining(row -> {
System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + row.toString(metadata, false, true));
position.set(currentScanner.getCurrentPosition());
});
});
} else {
JsonTransformer.toJson(currentScanner, partitions, cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
}
}
} catch (IOException e) {
// throwing exception outside main with broken pipe causes windows cmd to hang
e.printStackTrace(System.err);
}
System.exit(0);
}
use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.
the class StandaloneSSTableUtil method main.
public static void main(String[] args) {
Options options = Options.parseArgs(args);
try {
// load keyspace descriptions.
Util.initDatabaseDescriptor();
Schema.instance.loadFromDisk(false);
TableMetadata metadata = Schema.instance.getTableMetadata(options.keyspaceName, options.cfName);
if (metadata == null)
throw new IllegalArgumentException(String.format("Unknown keyspace/table %s.%s", options.keyspaceName, options.cfName));
OutputHandler handler = new OutputHandler.SystemOutput(options.verbose, options.debug);
if (options.cleanup) {
handler.output("Cleanuping up...");
LifecycleTransaction.removeUnfinishedLeftovers(metadata);
} else {
handler.output("Listing files...");
listFiles(options, metadata, handler);
}
System.exit(0);
} catch (Exception e) {
System.err.println(e.getMessage());
if (options.debug)
e.printStackTrace(System.err);
System.exit(1);
}
}
use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.
the class StorageService method getNaturalEndpoints.
/**
* This method returns the N endpoints that are responsible for storing the
* specified key i.e for replication.
*
* @param keyspaceName keyspace name also known as keyspace
* @param cf Column family name
* @param key key for which we need to find the endpoint
* @return the endpoint responsible for this key
*/
public List<InetAddress> getNaturalEndpoints(String keyspaceName, String cf, String key) {
KeyspaceMetadata ksMetaData = Schema.instance.getKeyspaceMetadata(keyspaceName);
if (ksMetaData == null)
throw new IllegalArgumentException("Unknown keyspace '" + keyspaceName + "'");
TableMetadata metadata = ksMetaData.getTableOrViewNullable(cf);
if (metadata == null)
throw new IllegalArgumentException("Unknown table '" + cf + "' in keyspace '" + keyspaceName + "'");
return getNaturalEndpoints(keyspaceName, tokenMetadata.partitioner.getToken(metadata.partitionKeyType.fromString(key)));
}
use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.
the class ByteOrderedPartitioner method describeOwnership.
public Map<Token, Float> describeOwnership(List<Token> sortedTokens) {
// allTokens will contain the count and be returned, sorted_ranges is shorthand for token<->token math.
Map<Token, Float> allTokens = new HashMap<Token, Float>();
List<Range<Token>> sortedRanges = new ArrayList<Range<Token>>(sortedTokens.size());
// this initializes the counts to 0 and calcs the ranges in order.
Token lastToken = sortedTokens.get(sortedTokens.size() - 1);
for (Token node : sortedTokens) {
allTokens.put(node, new Float(0.0));
sortedRanges.add(new Range<Token>(lastToken, node));
lastToken = node;
}
for (String ks : Schema.instance.getKeyspaces()) {
for (TableMetadata cfmd : Schema.instance.getTablesAndViews(ks)) {
for (Range<Token> r : sortedRanges) {
// Looping over every KS:CF:Range, get the splits size and add it to the count
allTokens.put(r.right, allTokens.get(r.right) + StorageService.instance.getSplits(ks, cfmd.name, r, 1).size());
}
}
}
// Sum every count up and divide count/total for the fractional ownership.
Float total = new Float(0.0);
for (Float f : allTokens.values()) total += f;
for (Map.Entry<Token, Float> row : allTokens.entrySet()) allTokens.put(row.getKey(), row.getValue() / total);
return allTokens;
}
use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.
the class AbstractBTreePartition method build.
protected static Holder build(UnfilteredRowIterator iterator, int initialRowCapacity, boolean ordered) {
TableMetadata metadata = iterator.metadata();
RegularAndStaticColumns columns = iterator.columns();
boolean reversed = iterator.isReverseOrder();
BTree.Builder<Row> builder = BTree.builder(metadata.comparator, initialRowCapacity);
builder.auto(!ordered);
MutableDeletionInfo.Builder deletionBuilder = MutableDeletionInfo.builder(iterator.partitionLevelDeletion(), metadata.comparator, reversed);
while (iterator.hasNext()) {
Unfiltered unfiltered = iterator.next();
if (unfiltered.kind() == Unfiltered.Kind.ROW)
builder.add((Row) unfiltered);
else
deletionBuilder.add((RangeTombstoneMarker) unfiltered);
}
if (reversed)
builder.reverse();
return new Holder(columns, builder.build(), deletionBuilder.build(), iterator.staticRow(), iterator.stats());
}
Aggregations