use of org.apache.cassandra.db.rows.UnfilteredRowIterator in project cassandra by apache.
the class LongLeveledCompactionStrategyTest method testLeveledScanner.
@Test
public void testLeveledScanner() throws Exception {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF_STANDARDLVL2);
// 100 KB value, make it easy to have multiple files
ByteBuffer value = ByteBuffer.wrap(new byte[100 * 1024]);
// Enough data to have a level 1 and 2
int rows = 128;
int columns = 10;
// Adds enough data to trigger multiple sstable per level
for (int r = 0; r < rows; r++) {
DecoratedKey key = Util.dk(String.valueOf(r));
UpdateBuilder builder = UpdateBuilder.create(store.metadata(), key);
for (int c = 0; c < columns; c++) builder.newRow("column" + c).add("val", value);
Mutation rm = new Mutation(builder.build());
rm.apply();
store.forceBlockingFlush();
}
LeveledCompactionStrategyTest.waitForLeveling(store);
store.disableAutoCompaction();
CompactionStrategyManager mgr = store.getCompactionStrategyManager();
LeveledCompactionStrategy lcs = (LeveledCompactionStrategy) mgr.getStrategies().get(1).get(0);
// 10 KB value
value = ByteBuffer.wrap(new byte[10 * 1024]);
// Adds 10 partitions
for (int r = 0; r < 10; r++) {
DecoratedKey key = Util.dk(String.valueOf(r));
UpdateBuilder builder = UpdateBuilder.create(store.metadata(), key);
for (int c = 0; c < 10; c++) builder.newRow("column" + c).add("val", value);
Mutation rm = new Mutation(builder.build());
rm.apply();
}
//Flush sstable
store.forceBlockingFlush();
store.runWithCompactionsDisabled(new Callable<Void>() {
public Void call() throws Exception {
Iterable<SSTableReader> allSSTables = store.getSSTables(SSTableSet.LIVE);
for (SSTableReader sstable : allSSTables) {
if (sstable.getSSTableLevel() == 0) {
System.out.println("Mutating L0-SSTABLE level to L1 to simulate a bug: " + sstable.getFilename());
sstable.descriptor.getMetadataSerializer().mutateLevel(sstable.descriptor, 1);
sstable.reloadSSTableMetadata();
}
}
try (AbstractCompactionStrategy.ScannerList scannerList = lcs.getScanners(Lists.newArrayList(allSSTables))) {
//Verify that leveled scanners will always iterate in ascending order (CASSANDRA-9935)
for (ISSTableScanner scanner : scannerList.scanners) {
DecoratedKey lastKey = null;
while (scanner.hasNext()) {
UnfilteredRowIterator row = scanner.next();
if (lastKey != null) {
assertTrue("row " + row.partitionKey() + " received out of order wrt " + lastKey, row.partitionKey().compareTo(lastKey) >= 0);
}
lastKey = row.partitionKey();
}
}
}
return null;
}
}, true, true);
}
use of org.apache.cassandra.db.rows.UnfilteredRowIterator in project cassandra by apache.
the class SSTableExport method main.
/**
* Given arguments specifying an SSTable, and optionally an output file, export the contents of the SSTable to JSON.
*
* @param args
* command lines arguments
* @throws ConfigurationException
* on configuration failure (wrong params given)
*/
public static void main(String[] args) throws ConfigurationException {
CommandLineParser parser = new PosixParser();
try {
cmd = parser.parse(options, args);
} catch (ParseException e1) {
System.err.println(e1.getMessage());
printUsage();
System.exit(1);
}
if (cmd.getArgs().length != 1) {
System.err.println("You must supply exactly one sstable");
printUsage();
System.exit(1);
}
String[] keys = cmd.getOptionValues(KEY_OPTION);
HashSet<String> excludes = new HashSet<>(Arrays.asList(cmd.getOptionValues(EXCLUDE_KEY_OPTION) == null ? new String[0] : cmd.getOptionValues(EXCLUDE_KEY_OPTION)));
String ssTableFileName = new File(cmd.getArgs()[0]).getAbsolutePath();
if (!new File(ssTableFileName).exists()) {
System.err.println("Cannot find file " + ssTableFileName);
System.exit(1);
}
Descriptor desc = Descriptor.fromFilename(ssTableFileName);
try {
TableMetadata metadata = metadataFromSSTable(desc);
if (cmd.hasOption(ENUMERATE_KEYS_OPTION)) {
try (KeyIterator iter = new KeyIterator(desc, metadata)) {
JsonTransformer.keysToJson(null, iterToStream(iter), cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
}
} else {
SSTableReader sstable = SSTableReader.openNoValidation(desc, TableMetadataRef.forOfflineTools(metadata));
IPartitioner partitioner = sstable.getPartitioner();
final ISSTableScanner currentScanner;
if ((keys != null) && (keys.length > 0)) {
List<AbstractBounds<PartitionPosition>> bounds = Arrays.stream(keys).filter(key -> !excludes.contains(key)).map(metadata.partitionKeyType::fromString).map(partitioner::decorateKey).sorted().map(DecoratedKey::getToken).map(token -> new Bounds<>(token.minKeyBound(), token.maxKeyBound())).collect(Collectors.toList());
currentScanner = sstable.getScanner(bounds.iterator());
} else {
currentScanner = sstable.getScanner();
}
Stream<UnfilteredRowIterator> partitions = iterToStream(currentScanner).filter(i -> excludes.isEmpty() || !excludes.contains(metadata.partitionKeyType.getString(i.partitionKey().getKey())));
if (cmd.hasOption(DEBUG_OUTPUT_OPTION)) {
AtomicLong position = new AtomicLong();
partitions.forEach(partition -> {
position.set(currentScanner.getCurrentPosition());
if (!partition.partitionLevelDeletion().isLive()) {
System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + partition.partitionLevelDeletion());
}
if (!partition.staticRow().isEmpty()) {
System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + partition.staticRow().toString(metadata, true));
}
partition.forEachRemaining(row -> {
System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + row.toString(metadata, false, true));
position.set(currentScanner.getCurrentPosition());
});
});
} else {
JsonTransformer.toJson(currentScanner, partitions, cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
}
}
} catch (IOException e) {
// throwing exception outside main with broken pipe causes windows cmd to hang
e.printStackTrace(System.err);
}
System.exit(0);
}
use of org.apache.cassandra.db.rows.UnfilteredRowIterator in project cassandra by apache.
the class CompactionManager method doCleanupOne.
/**
* This function goes over a file and removes the keys that the node is not responsible for
* and only keeps keys that this node is responsible for.
*
* @throws IOException
*/
private void doCleanupOne(final ColumnFamilyStore cfs, LifecycleTransaction txn, CleanupStrategy cleanupStrategy, Collection<Range<Token>> ranges, boolean hasIndexes) throws IOException {
assert !cfs.isIndex();
SSTableReader sstable = txn.onlyOne();
if (!hasIndexes && !new Bounds<>(sstable.first.getToken(), sstable.last.getToken()).intersects(ranges)) {
txn.obsoleteOriginals();
txn.finish();
return;
}
if (!needsCleanup(sstable, ranges)) {
logger.trace("Skipping {} for cleanup; all rows should be kept", sstable);
return;
}
long start = System.nanoTime();
long totalkeysWritten = 0;
long expectedBloomFilterSize = Math.max(cfs.metadata().params.minIndexInterval, SSTableReader.getApproximateKeyCount(txn.originals()));
if (logger.isTraceEnabled())
logger.trace("Expected bloom filter size : {}", expectedBloomFilterSize);
logger.info("Cleaning up {}", sstable);
File compactionFileLocation = sstable.descriptor.directory;
RateLimiter limiter = getRateLimiter();
double compressionRatio = sstable.getCompressionRatio();
if (compressionRatio == MetadataCollector.NO_COMPRESSION_RATIO)
compressionRatio = 1.0;
List<SSTableReader> finished;
int nowInSec = FBUtilities.nowInSeconds();
try (SSTableRewriter writer = SSTableRewriter.construct(cfs, txn, false, sstable.maxDataAge);
ISSTableScanner scanner = cleanupStrategy.getScanner(sstable);
CompactionController controller = new CompactionController(cfs, txn.originals(), getDefaultGcBefore(cfs, nowInSec));
CompactionIterator ci = new CompactionIterator(OperationType.CLEANUP, Collections.singletonList(scanner), controller, nowInSec, UUIDGen.getTimeUUID(), metrics)) {
StatsMetadata metadata = sstable.getSSTableMetadata();
writer.switchWriter(createWriter(cfs, compactionFileLocation, expectedBloomFilterSize, metadata.repairedAt, metadata.pendingRepair, sstable, txn));
long lastBytesScanned = 0;
while (ci.hasNext()) {
if (ci.isStopRequested())
throw new CompactionInterruptedException(ci.getCompactionInfo());
try (UnfilteredRowIterator partition = ci.next();
UnfilteredRowIterator notCleaned = cleanupStrategy.cleanup(partition)) {
if (notCleaned == null)
continue;
if (writer.append(notCleaned) != null)
totalkeysWritten++;
long bytesScanned = scanner.getBytesScanned();
compactionRateLimiterAcquire(limiter, bytesScanned, lastBytesScanned, compressionRatio);
lastBytesScanned = bytesScanned;
}
}
// flush to ensure we don't lose the tombstones on a restart, since they are not commitlog'd
cfs.indexManager.flushAllIndexesBlocking();
finished = writer.finish();
}
if (!finished.isEmpty()) {
String format = "Cleaned up to %s. %s to %s (~%d%% of original) for %,d keys. Time: %,dms.";
long dTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
long startsize = sstable.onDiskLength();
long endsize = 0;
for (SSTableReader newSstable : finished) endsize += newSstable.onDiskLength();
double ratio = (double) endsize / (double) startsize;
logger.info(String.format(format, finished.get(0).getFilename(), FBUtilities.prettyPrintMemory(startsize), FBUtilities.prettyPrintMemory(endsize), (int) (ratio * 100), totalkeysWritten, dTime));
}
}
use of org.apache.cassandra.db.rows.UnfilteredRowIterator in project cassandra by apache.
the class ReadCommandTest method testSinglePartitionGroupMerge.
@Test
public void testSinglePartitionGroupMerge() throws Exception {
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(CF3);
String[][][] groups = new String[][][] { new String[][] { // "1" indicates to create the data, "-1" to delete the row
new String[] { "1", "key1", "aa", "a" }, new String[] { "1", "key2", "bb", "b" }, new String[] { "1", "key3", "cc", "c" } }, new String[][] { new String[] { "1", "key3", "dd", "d" }, new String[] { "1", "key2", "ee", "e" }, new String[] { "1", "key1", "ff", "f" } }, new String[][] { new String[] { "1", "key6", "aa", "a" }, new String[] { "1", "key5", "bb", "b" }, new String[] { "1", "key4", "cc", "c" } }, new String[][] { new String[] { "-1", "key6", "aa", "a" }, new String[] { "-1", "key2", "bb", "b" } } };
// Given the data above, when the keys are sorted and the deletions removed, we should
// get these clustering rows in this order
String[] expectedRows = new String[] { "aa", "ff", "ee", "cc", "dd", "cc", "bb" };
List<ByteBuffer> buffers = new ArrayList<>(groups.length);
int nowInSeconds = FBUtilities.nowInSeconds();
ColumnFilter columnFilter = ColumnFilter.allRegularColumnsBuilder(cfs.metadata()).build();
RowFilter rowFilter = RowFilter.create();
Slice slice = Slice.make(ClusteringBound.BOTTOM, ClusteringBound.TOP);
ClusteringIndexSliceFilter sliceFilter = new ClusteringIndexSliceFilter(Slices.with(cfs.metadata().comparator, slice), false);
for (String[][] group : groups) {
cfs.truncateBlocking();
List<SinglePartitionReadCommand> commands = new ArrayList<>(group.length);
for (String[] data : group) {
if (data[0].equals("1")) {
new RowUpdateBuilder(cfs.metadata(), 0, ByteBufferUtil.bytes(data[1])).clustering(data[2]).add(data[3], ByteBufferUtil.bytes("blah")).build().apply();
} else {
RowUpdateBuilder.deleteRow(cfs.metadata(), FBUtilities.timestampMicros(), ByteBufferUtil.bytes(data[1]), data[2]).apply();
}
commands.add(SinglePartitionReadCommand.create(cfs.metadata(), nowInSeconds, columnFilter, rowFilter, DataLimits.NONE, Util.dk(data[1]), sliceFilter));
}
cfs.forceBlockingFlush();
ReadQuery query = new SinglePartitionReadCommand.Group(commands, DataLimits.NONE);
try (ReadExecutionController executionController = query.executionController();
UnfilteredPartitionIterator iter = query.executeLocally(executionController);
DataOutputBuffer buffer = new DataOutputBuffer()) {
UnfilteredPartitionIterators.serializerForIntraNode().serialize(iter, columnFilter, buffer, MessagingService.current_version);
buffers.add(buffer.buffer());
}
}
// deserialize, merge and check the results are all there
List<UnfilteredPartitionIterator> iterators = new ArrayList<>();
for (ByteBuffer buffer : buffers) {
try (DataInputBuffer in = new DataInputBuffer(buffer, true)) {
iterators.add(UnfilteredPartitionIterators.serializerForIntraNode().deserialize(in, MessagingService.current_version, cfs.metadata(), columnFilter, SerializationHelper.Flag.LOCAL));
}
}
try (PartitionIterator partitionIterator = UnfilteredPartitionIterators.mergeAndFilter(iterators, nowInSeconds, new UnfilteredPartitionIterators.MergeListener() {
public UnfilteredRowIterators.MergeListener getRowMergeListener(DecoratedKey partitionKey, List<UnfilteredRowIterator> versions) {
return null;
}
public void close() {
}
})) {
int i = 0;
int numPartitions = 0;
while (partitionIterator.hasNext()) {
numPartitions++;
try (RowIterator rowIterator = partitionIterator.next()) {
while (rowIterator.hasNext()) {
Row row = rowIterator.next();
assertEquals("col=" + expectedRows[i++], row.clustering().toString(cfs.metadata()));
//System.out.print(row.toString(cfs.metadata, true));
}
}
}
assertEquals(5, numPartitions);
assertEquals(expectedRows.length, i);
}
}
use of org.apache.cassandra.db.rows.UnfilteredRowIterator in project cassandra by apache.
the class RecoveryManagerMissingHeaderTest method testMissingHeader.
@Test
public void testMissingHeader() throws IOException {
Keyspace keyspace1 = Keyspace.open(KEYSPACE1);
Keyspace keyspace2 = Keyspace.open(KEYSPACE2);
DecoratedKey dk = Util.dk("keymulti");
UnfilteredRowIterator upd1 = Util.apply(new RowUpdateBuilder(keyspace1.getColumnFamilyStore(CF_STANDARD1).metadata(), 1L, 0, "keymulti").clustering("col1").add("val", "1").build());
UnfilteredRowIterator upd2 = Util.apply(new RowUpdateBuilder(keyspace2.getColumnFamilyStore(CF_STANDARD3).metadata(), 1L, 0, "keymulti").clustering("col1").add("val", "1").build());
keyspace1.getColumnFamilyStore("Standard1").clearUnsafe();
keyspace2.getColumnFamilyStore("Standard3").clearUnsafe();
// nuke the header
for (File file : new File(DatabaseDescriptor.getCommitLogLocation()).listFiles()) {
if (file.getName().endsWith(".header"))
FileUtils.deleteWithConfirm(file);
}
CommitLog.instance.resetUnsafe(false);
Assert.assertTrue(Util.sameContent(upd1, Util.getOnlyPartitionUnfiltered(Util.cmd(keyspace1.getColumnFamilyStore(CF_STANDARD1), dk).build()).unfilteredIterator()));
Assert.assertTrue(Util.sameContent(upd2, Util.getOnlyPartitionUnfiltered(Util.cmd(keyspace2.getColumnFamilyStore(CF_STANDARD3), dk).build()).unfilteredIterator()));
}
Aggregations