use of org.apache.cassandra.io.sstable.ISSTableScanner in project cassandra by apache.
the class SerializationHeaderTest method testWrittenAsDifferentKind.
@Test
public void testWrittenAsDifferentKind() throws Exception {
final String tableName = "testWrittenAsDifferentKind";
// final String schemaCqlWithStatic = String.format("CREATE TABLE %s (k int, c int, v int static, PRIMARY KEY(k, c))", tableName);
// final String schemaCqlWithRegular = String.format("CREATE TABLE %s (k int, c int, v int, PRIMARY KEY(k, c))", tableName);
ColumnIdentifier v = ColumnIdentifier.getInterned("v", false);
TableMetadata schemaWithStatic = TableMetadata.builder(KEYSPACE, tableName).addPartitionKeyColumn("k", Int32Type.instance).addClusteringColumn("c", Int32Type.instance).addStaticColumn("v", Int32Type.instance).build();
TableMetadata schemaWithRegular = TableMetadata.builder(KEYSPACE, tableName).addPartitionKeyColumn("k", Int32Type.instance).addClusteringColumn("c", Int32Type.instance).addRegularColumn("v", Int32Type.instance).build();
ColumnMetadata columnStatic = schemaWithStatic.getColumn(v);
ColumnMetadata columnRegular = schemaWithRegular.getColumn(v);
schemaWithStatic = schemaWithStatic.unbuild().recordColumnDrop(columnRegular, 0L).build();
schemaWithRegular = schemaWithRegular.unbuild().recordColumnDrop(columnStatic, 0L).build();
final AtomicInteger generation = new AtomicInteger();
File dir = new File(Files.createTempDir());
try {
BiFunction<TableMetadata, Function<ByteBuffer, Clustering<?>>, Callable<Descriptor>> writer = (schema, clusteringFunction) -> () -> {
Descriptor descriptor = new Descriptor(BigFormat.latestVersion, dir, schema.keyspace, schema.name, generation.incrementAndGet(), SSTableFormat.Type.BIG);
SerializationHeader header = SerializationHeader.makeWithoutStats(schema);
try (LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.WRITE);
SSTableWriter sstableWriter = BigTableWriter.create(TableMetadataRef.forOfflineTools(schema), descriptor, 1, 0L, null, false, 0, header, Collections.emptyList(), txn)) {
ColumnMetadata cd = schema.getColumn(v);
for (int i = 0; i < 5; ++i) {
final ByteBuffer value = Int32Type.instance.decompose(i);
Cell<?> cell = BufferCell.live(cd, 1L, value);
Clustering<?> clustering = clusteringFunction.apply(value);
Row row = BTreeRow.singleCellRow(clustering, cell);
sstableWriter.append(PartitionUpdate.singleRowUpdate(schema, value, row).unfilteredIterator());
}
sstableWriter.finish(false);
txn.finish();
}
return descriptor;
};
Descriptor sstableWithRegular = writer.apply(schemaWithRegular, BufferClustering::new).call();
Descriptor sstableWithStatic = writer.apply(schemaWithStatic, value -> Clustering.STATIC_CLUSTERING).call();
SSTableReader readerWithStatic = SSTableReader.openNoValidation(sstableWithStatic, TableMetadataRef.forOfflineTools(schemaWithRegular));
SSTableReader readerWithRegular = SSTableReader.openNoValidation(sstableWithRegular, TableMetadataRef.forOfflineTools(schemaWithStatic));
try (ISSTableScanner partitions = readerWithStatic.getScanner()) {
for (int i = 0; i < 5; ++i) {
UnfilteredRowIterator partition = partitions.next();
Assert.assertFalse(partition.hasNext());
long value = Int32Type.instance.compose(partition.staticRow().getCell(columnStatic).buffer());
Assert.assertEquals(value, (long) i);
}
Assert.assertFalse(partitions.hasNext());
}
try (ISSTableScanner partitions = readerWithRegular.getScanner()) {
for (int i = 0; i < 5; ++i) {
UnfilteredRowIterator partition = partitions.next();
long value = Int32Type.instance.compose(((Row) partition.next()).getCell(columnRegular).buffer());
Assert.assertEquals(value, (long) i);
Assert.assertTrue(partition.staticRow().isEmpty());
Assert.assertFalse(partition.hasNext());
}
Assert.assertFalse(partitions.hasNext());
}
} finally {
FileUtils.deleteRecursive(dir);
}
}
use of org.apache.cassandra.io.sstable.ISSTableScanner in project cassandra by apache.
the class PendingAntiCompactionTest method testUnblockedAcquisition.
@Test
public void testUnblockedAcquisition() throws ExecutionException, InterruptedException {
cfs.disableAutoCompaction();
ExecutorService es = Executors.newFixedThreadPool(1);
makeSSTables(2);
UUID prsid = prepareSession();
Set<SSTableReader> sstables = cfs.getLiveSSTables();
List<ISSTableScanner> scanners = sstables.stream().map(SSTableReader::getScanner).collect(Collectors.toList());
try {
try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.ANTICOMPACTION);
CompactionController controller = new CompactionController(cfs, sstables, 0);
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, scanners, controller, 0, UUID.randomUUID())) {
// `ci` is our imaginary ongoing anticompaction which makes no progress until after 5s
// now we try to start a new AC, which will try to cancel all ongoing compactions
CompactionManager.instance.active.beginCompaction(ci);
PendingAntiCompaction pac = new PendingAntiCompaction(prsid, Collections.singleton(cfs), atEndpoint(FULL_RANGE, NO_RANGES), es, () -> false);
ListenableFuture fut = pac.run();
try {
fut.get(5, TimeUnit.SECONDS);
} catch (TimeoutException e) {
// expected, we wait 1 minute for compactions to get cancelled in runWithCompactionsDisabled, but we are not iterating
// CompactionIterator so the compaction is not actually cancelled
}
try {
assertTrue(ci.hasNext());
ci.next();
fail("CompactionIterator should be abortable");
} catch (CompactionInterruptedException e) {
CompactionManager.instance.active.finishCompaction(ci);
txn.abort();
// expected
}
CountDownLatch cdl = new CountDownLatch(1);
Futures.addCallback(fut, new FutureCallback<Object>() {
public void onSuccess(@Nullable Object o) {
cdl.countDown();
}
public void onFailure(Throwable throwable) {
}
}, MoreExecutors.directExecutor());
assertTrue(cdl.await(1, TimeUnit.MINUTES));
}
} finally {
es.shutdown();
}
}
use of org.apache.cassandra.io.sstable.ISSTableScanner in project cassandra by apache.
the class ThrottledUnfilteredIteratorTest method complexThrottleWithTombstoneTest.
@Test
public void complexThrottleWithTombstoneTest() throws Throwable {
// create cell tombstone, range tombstone, partition deletion
createTable("CREATE TABLE %s (pk int, ck1 int, ck2 int, v1 int, v2 int, PRIMARY KEY (pk, ck1, ck2))");
for (int ck1 = 1; ck1 <= 150; ck1++) for (int ck2 = 1; ck2 <= 150; ck2++) {
int timestamp = ck1, v1 = ck1, v2 = ck2;
execute("INSERT INTO %s(pk,ck1,ck2,v1,v2) VALUES(1,?,?,?,?) using timestamp " + timestamp, ck1, ck2, v1, v2);
}
for (int ck1 = 1; ck1 <= 100; ck1++) for (int ck2 = 1; ck2 <= 100; ck2++) {
if (// range tombstone
ck1 % 2 == 0 || ck1 % 3 == 0)
execute("DELETE FROM %s USING TIMESTAMP 170 WHERE pk=1 AND ck1=?", ck1);
else if (// row tombstone
ck1 == ck2)
execute("DELETE FROM %s USING TIMESTAMP 180 WHERE pk=1 AND ck1=? AND ck2=?", ck1, ck2);
else if (// cell tombstone
ck1 == ck2 - 1)
execute("DELETE v2 FROM %s USING TIMESTAMP 190 WHERE pk=1 AND ck1=? AND ck2=?", ck1, ck2);
}
// range deletion
execute("DELETE FROM %s USING TIMESTAMP 150 WHERE pk=1 AND ck1 > 100 AND ck1 < 120");
execute("DELETE FROM %s USING TIMESTAMP 150 WHERE pk=1 AND ck1 = 50 AND ck2 < 120");
// partition deletion
execute("DELETE FROM %s USING TIMESTAMP 160 WHERE pk=1");
// flush and generate 1 sstable
ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(currentTable());
cfs.forceBlockingFlush();
cfs.disableAutoCompaction();
cfs.forceMajorCompaction();
assertEquals(1, cfs.getLiveSSTables().size());
SSTableReader reader = cfs.getLiveSSTables().iterator().next();
try (ISSTableScanner scanner = reader.getScanner()) {
try (UnfilteredRowIterator rowIterator = scanner.next()) {
// only 1 partition data
assertFalse(scanner.hasNext());
List<Unfiltered> expectedUnfiltereds = new ArrayList<>();
rowIterator.forEachRemaining(expectedUnfiltereds::add);
// test different throttle
for (Integer throttle : Arrays.asList(2, 3, 4, 5, 11, 41, 99, 1000, 10001)) {
try (ISSTableScanner scannerForThrottle = reader.getScanner()) {
assertTrue(scannerForThrottle.hasNext());
try (UnfilteredRowIterator rowIteratorForThrottle = scannerForThrottle.next()) {
assertFalse(scannerForThrottle.hasNext());
verifyThrottleIterator(expectedUnfiltereds, rowIteratorForThrottle, new ThrottledUnfilteredIterator(rowIteratorForThrottle, throttle), throttle);
}
}
}
}
}
}
use of org.apache.cassandra.io.sstable.ISSTableScanner in project cassandra by apache.
the class SSTableExport method main.
/**
* Given arguments specifying an SSTable, and optionally an output file, export the contents of the SSTable to JSON.
*
* @param args
* command lines arguments
* @throws ConfigurationException
* on configuration failure (wrong params given)
*/
@SuppressWarnings("resource")
public static void main(String[] args) throws ConfigurationException {
CommandLineParser parser = new PosixParser();
try {
cmd = parser.parse(options, args);
} catch (ParseException e1) {
System.err.println(e1.getMessage());
printUsage();
System.exit(1);
}
String[] keys = cmd.getOptionValues(KEY_OPTION);
HashSet<String> excludes = new HashSet<>(Arrays.asList(cmd.getOptionValues(EXCLUDE_KEY_OPTION) == null ? new String[0] : cmd.getOptionValues(EXCLUDE_KEY_OPTION)));
if (cmd.getArgs().length != 1) {
String msg = "You must supply exactly one sstable";
if (cmd.getArgs().length == 0 && (keys != null && keys.length > 0 || !excludes.isEmpty()))
msg += ", which should be before the -k/-x options so it's not interpreted as a partition key.";
System.err.println(msg);
printUsage();
System.exit(1);
}
String ssTableFileName = new File(cmd.getArgs()[0]).absolutePath();
if (!new File(ssTableFileName).exists()) {
System.err.println("Cannot find file " + ssTableFileName);
System.exit(1);
}
Descriptor desc = Descriptor.fromFilename(ssTableFileName);
try {
TableMetadata metadata = Util.metadataFromSSTable(desc);
if (cmd.hasOption(ENUMERATE_KEYS_OPTION)) {
try (KeyIterator iter = new KeyIterator(desc, metadata)) {
JsonTransformer.keysToJson(null, Util.iterToStream(iter), cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
}
} else {
SSTableReader sstable = SSTableReader.openNoValidation(desc, TableMetadataRef.forOfflineTools(metadata));
IPartitioner partitioner = sstable.getPartitioner();
final ISSTableScanner currentScanner;
if ((keys != null) && (keys.length > 0)) {
List<AbstractBounds<PartitionPosition>> bounds = Arrays.stream(keys).filter(key -> !excludes.contains(key)).map(metadata.partitionKeyType::fromString).map(partitioner::decorateKey).sorted().map(DecoratedKey::getToken).map(token -> new Bounds<>(token.minKeyBound(), token.maxKeyBound())).collect(Collectors.toList());
currentScanner = sstable.getScanner(bounds.iterator());
} else {
currentScanner = sstable.getScanner();
}
Stream<UnfilteredRowIterator> partitions = Util.iterToStream(currentScanner).filter(i -> excludes.isEmpty() || !excludes.contains(metadata.partitionKeyType.getString(i.partitionKey().getKey())));
if (cmd.hasOption(DEBUG_OUTPUT_OPTION)) {
AtomicLong position = new AtomicLong();
partitions.forEach(partition -> {
position.set(currentScanner.getCurrentPosition());
if (!partition.partitionLevelDeletion().isLive()) {
System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + partition.partitionLevelDeletion());
}
if (!partition.staticRow().isEmpty()) {
System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + partition.staticRow().toString(metadata, true));
}
partition.forEachRemaining(row -> {
System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + row.toString(metadata, false, true));
position.set(currentScanner.getCurrentPosition());
});
});
} else if (cmd.hasOption(PARTITION_JSON_LINES)) {
JsonTransformer.toJsonLines(currentScanner, partitions, cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
} else {
JsonTransformer.toJson(currentScanner, partitions, cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
}
}
} catch (IOException e) {
e.printStackTrace(System.err);
}
System.exit(0);
}
Aggregations