use of org.apache.cassandra.db.DecoratedKey in project cassandra by apache.
the class KeyIterator method computeNext.
protected DecoratedKey computeNext() {
fileAccessLock.readLock().lock();
try {
if (in.isEOF())
return endOfData();
keyPosition = in.getFilePointer();
DecoratedKey key = partitioner.decorateKey(ByteBufferUtil.readWithShortLength(in.get()));
// skip remainder of the entry
RowIndexEntry.Serializer.skip(in.get(), desc.version);
return key;
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
fileAccessLock.readLock().unlock();
}
}
use of org.apache.cassandra.db.DecoratedKey in project cassandra by apache.
the class SSTableRewriter method append.
public RowIndexEntry append(UnfilteredRowIterator partition) {
// we do this before appending to ensure we can resetAndTruncate() safely if the append fails
DecoratedKey key = partition.partitionKey();
maybeReopenEarly(key);
RowIndexEntry index = writer.append(partition);
if (DatabaseDescriptor.shouldMigrateKeycacheOnCompaction()) {
if (!transaction.isOffline() && index != null) {
for (SSTableReader reader : transaction.originals()) {
if (reader.getCachedPosition(key, false) != null) {
cachedKeys.put(key, index);
break;
}
}
}
}
return index;
}
use of org.apache.cassandra.db.DecoratedKey in project cassandra by apache.
the class SSTableExport method main.
/**
* Given arguments specifying an SSTable, and optionally an output file, export the contents of the SSTable to JSON.
*
* @param args
* command lines arguments
* @throws ConfigurationException
* on configuration failure (wrong params given)
*/
@SuppressWarnings("resource")
public static void main(String[] args) throws ConfigurationException {
CommandLineParser parser = new PosixParser();
try {
cmd = parser.parse(options, args);
} catch (ParseException e1) {
System.err.println(e1.getMessage());
printUsage();
System.exit(1);
}
String[] keys = cmd.getOptionValues(KEY_OPTION);
HashSet<String> excludes = new HashSet<>(Arrays.asList(cmd.getOptionValues(EXCLUDE_KEY_OPTION) == null ? new String[0] : cmd.getOptionValues(EXCLUDE_KEY_OPTION)));
if (cmd.getArgs().length != 1) {
String msg = "You must supply exactly one sstable";
if (cmd.getArgs().length == 0 && (keys != null && keys.length > 0 || !excludes.isEmpty()))
msg += ", which should be before the -k/-x options so it's not interpreted as a partition key.";
System.err.println(msg);
printUsage();
System.exit(1);
}
String ssTableFileName = new File(cmd.getArgs()[0]).absolutePath();
if (!new File(ssTableFileName).exists()) {
System.err.println("Cannot find file " + ssTableFileName);
System.exit(1);
}
Descriptor desc = Descriptor.fromFilename(ssTableFileName);
try {
TableMetadata metadata = Util.metadataFromSSTable(desc);
if (cmd.hasOption(ENUMERATE_KEYS_OPTION)) {
try (KeyIterator iter = new KeyIterator(desc, metadata)) {
JsonTransformer.keysToJson(null, Util.iterToStream(iter), cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
}
} else {
SSTableReader sstable = SSTableReader.openNoValidation(desc, TableMetadataRef.forOfflineTools(metadata));
IPartitioner partitioner = sstable.getPartitioner();
final ISSTableScanner currentScanner;
if ((keys != null) && (keys.length > 0)) {
List<AbstractBounds<PartitionPosition>> bounds = Arrays.stream(keys).filter(key -> !excludes.contains(key)).map(metadata.partitionKeyType::fromString).map(partitioner::decorateKey).sorted().map(DecoratedKey::getToken).map(token -> new Bounds<>(token.minKeyBound(), token.maxKeyBound())).collect(Collectors.toList());
currentScanner = sstable.getScanner(bounds.iterator());
} else {
currentScanner = sstable.getScanner();
}
Stream<UnfilteredRowIterator> partitions = Util.iterToStream(currentScanner).filter(i -> excludes.isEmpty() || !excludes.contains(metadata.partitionKeyType.getString(i.partitionKey().getKey())));
if (cmd.hasOption(DEBUG_OUTPUT_OPTION)) {
AtomicLong position = new AtomicLong();
partitions.forEach(partition -> {
position.set(currentScanner.getCurrentPosition());
if (!partition.partitionLevelDeletion().isLive()) {
System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + partition.partitionLevelDeletion());
}
if (!partition.staticRow().isEmpty()) {
System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + partition.staticRow().toString(metadata, true));
}
partition.forEachRemaining(row -> {
System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + row.toString(metadata, false, true));
position.set(currentScanner.getCurrentPosition());
});
});
} else if (cmd.hasOption(PARTITION_JSON_LINES)) {
JsonTransformer.toJsonLines(currentScanner, partitions, cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
} else {
JsonTransformer.toJson(currentScanner, partitions, cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
}
}
} catch (IOException e) {
e.printStackTrace(System.err);
}
System.exit(0);
}
use of org.apache.cassandra.db.DecoratedKey in project cassandra by apache.
the class CompactionControllerTest method testMaxPurgeableTimestamp.
@Test
public void testMaxPurgeableTimestamp() {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF1);
cfs.truncateBlocking();
DecoratedKey key = Util.dk("k1");
// latest timestamp
long timestamp1 = FBUtilities.timestampMicros();
long timestamp2 = timestamp1 - 5;
// oldest timestamp
long timestamp3 = timestamp2 - 5;
// add to first memtable
applyMutation(cfs.metadata(), key, timestamp1);
// check max purgeable timestamp without any sstables
try (CompactionController controller = new CompactionController(cfs, null, 0)) {
// memtable only
assertPurgeBoundary(controller.getPurgeEvaluator(key), timestamp1);
cfs.forceBlockingFlush();
// no memtables and no sstables
assertTrue(controller.getPurgeEvaluator(key).test(Long.MAX_VALUE));
}
// first sstable is compacting
Set<SSTableReader> compacting = Sets.newHashSet(cfs.getLiveSSTables());
// create another sstable
applyMutation(cfs.metadata(), key, timestamp2);
cfs.forceBlockingFlush();
// check max purgeable timestamp when compacting the first sstable with and without a memtable
try (CompactionController controller = new CompactionController(cfs, compacting, 0)) {
assertPurgeBoundary(controller.getPurgeEvaluator(key), timestamp2);
applyMutation(cfs.metadata(), key, timestamp3);
// second sstable and second memtable
assertPurgeBoundary(controller.getPurgeEvaluator(key), timestamp3);
}
// check max purgeable timestamp again without any sstables but with different insertion orders on the memtable
cfs.forceBlockingFlush();
// newest to oldest
try (CompactionController controller = new CompactionController(cfs, null, 0)) {
applyMutation(cfs.metadata(), key, timestamp1);
applyMutation(cfs.metadata(), key, timestamp2);
applyMutation(cfs.metadata(), key, timestamp3);
// memtable only
assertPurgeBoundary(controller.getPurgeEvaluator(key), timestamp3);
}
cfs.forceBlockingFlush();
// oldest to newest
try (CompactionController controller = new CompactionController(cfs, null, 0)) {
applyMutation(cfs.metadata(), key, timestamp3);
applyMutation(cfs.metadata(), key, timestamp2);
applyMutation(cfs.metadata(), key, timestamp1);
assertPurgeBoundary(controller.getPurgeEvaluator(key), timestamp3);
}
}
use of org.apache.cassandra.db.DecoratedKey in project cassandra by apache.
the class CompactionControllerTest method testGetFullyExpiredSSTables.
@Test
public void testGetFullyExpiredSSTables() {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF2);
cfs.truncateBlocking();
DecoratedKey key = Util.dk("k1");
// latest timestamp
long timestamp1 = FBUtilities.timestampMicros();
long timestamp2 = timestamp1 - 5;
// oldest timestamp
long timestamp3 = timestamp2 - 5;
// create sstable with tombstone that should be expired in no older timestamps
applyDeleteMutation(cfs.metadata(), key, timestamp2);
cfs.forceBlockingFlush();
// first sstable with tombstone is compacting
Set<SSTableReader> compacting = Sets.newHashSet(cfs.getLiveSSTables());
// create another sstable with more recent timestamp
applyMutation(cfs.metadata(), key, timestamp1);
cfs.forceBlockingFlush();
// second sstable is overlapping
Set<SSTableReader> overlapping = Sets.difference(Sets.newHashSet(cfs.getLiveSSTables()), compacting);
// the first sstable should be expired because the overlapping sstable is newer and the gc period is later
int gcBefore = (int) (System.currentTimeMillis() / 1000) + 5;
Set<SSTableReader> expired = CompactionController.getFullyExpiredSSTables(cfs, compacting, overlapping, gcBefore);
assertNotNull(expired);
assertEquals(1, expired.size());
assertEquals(compacting.iterator().next(), expired.iterator().next());
// however if we add an older mutation to the memtable then the sstable should not be expired
applyMutation(cfs.metadata(), key, timestamp3);
expired = CompactionController.getFullyExpiredSSTables(cfs, compacting, overlapping, gcBefore);
assertNotNull(expired);
assertEquals(0, expired.size());
// Now if we explicitly ask to ignore overlaped sstables, we should get back our expired sstable
expired = CompactionController.getFullyExpiredSSTables(cfs, compacting, overlapping, gcBefore, true);
assertNotNull(expired);
assertEquals(1, expired.size());
}
Aggregations