use of org.apache.cassandra.cql3.functions.Function in project cassandra by apache.
the class UFAuthTest method functionResource.
private FunctionResource functionResource(String functionName) {
// Note that this is somewhat brittle as it assumes that function names are
// truly unique. As such, it will break in the face of overloading.
// It is here to avoid having to duplicate the functionality of CqlParser
// for transforming cql types into AbstractTypes
FunctionName fn = parseFunctionName(functionName);
Collection<Function> functions = Schema.instance.getFunctions(fn);
assertEquals(String.format("Expected a single function definition for %s, but found %s", functionName, functions.size()), 1, functions.size());
return FunctionResource.function(fn.keyspace, fn.name, functions.iterator().next().argTypes());
}
use of org.apache.cassandra.cql3.functions.Function in project cassandra by apache.
the class UFTest method testFunctionDropPreparedStatement.
@Test
public void testFunctionDropPreparedStatement() throws Throwable {
createTable("CREATE TABLE %s (key int PRIMARY KEY, d double)");
String fSin = createFunction(KEYSPACE_PER_TEST, "double", "CREATE FUNCTION %s ( input double ) " + "CALLED ON NULL INPUT " + "RETURNS double " + "LANGUAGE java " + "AS 'return Double.valueOf(Math.sin(input.doubleValue()));'");
FunctionName fSinName = parseFunctionName(fSin);
Assert.assertEquals(1, Schema.instance.getFunctions(parseFunctionName(fSin)).size());
// create a pairs of Select and Inserts. One statement in each pair uses the function so when we
// drop it those statements should be removed from the cache in QueryProcessor. The other statements
// should be unaffected.
ResultMessage.Prepared preparedSelect1 = QueryProcessor.instance.prepare(String.format("SELECT key, %s(d) FROM %s.%s", fSin, KEYSPACE, currentTable()), ClientState.forInternalCalls());
ResultMessage.Prepared preparedSelect2 = QueryProcessor.instance.prepare(String.format("SELECT key FROM %s.%s", KEYSPACE, currentTable()), ClientState.forInternalCalls());
ResultMessage.Prepared preparedInsert1 = QueryProcessor.instance.prepare(String.format("INSERT INTO %s.%s (key, d) VALUES (?, %s(?))", KEYSPACE, currentTable(), fSin), ClientState.forInternalCalls());
ResultMessage.Prepared preparedInsert2 = QueryProcessor.instance.prepare(String.format("INSERT INTO %s.%s (key, d) VALUES (?, ?)", KEYSPACE, currentTable()), ClientState.forInternalCalls());
Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedSelect1.statementId));
Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedSelect2.statementId));
Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedInsert1.statementId));
Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedInsert2.statementId));
execute("DROP FUNCTION " + fSin + "(double);");
// the statements which use the dropped function should be removed from cache, with the others remaining
Assert.assertNull(QueryProcessor.instance.getPrepared(preparedSelect1.statementId));
Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedSelect2.statementId));
Assert.assertNull(QueryProcessor.instance.getPrepared(preparedInsert1.statementId));
Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedInsert2.statementId));
execute("CREATE FUNCTION " + fSin + " ( input double ) " + "RETURNS NULL ON NULL INPUT " + "RETURNS double " + "LANGUAGE java " + "AS 'return Double.valueOf(Math.sin(input));'");
Assert.assertEquals(1, Schema.instance.getFunctions(fSinName).size());
preparedSelect1 = QueryProcessor.instance.prepare(String.format("SELECT key, %s(d) FROM %s.%s", fSin, KEYSPACE, currentTable()), ClientState.forInternalCalls());
preparedInsert1 = QueryProcessor.instance.prepare(String.format("INSERT INTO %s.%s (key, d) VALUES (?, %s(?))", KEYSPACE, currentTable(), fSin), ClientState.forInternalCalls());
Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedSelect1.statementId));
Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedInsert1.statementId));
dropPerTestKeyspace();
// again, only the 2 statements referencing the function should be removed from cache
// this time because the statements select from tables in KEYSPACE, only the function
// is scoped to KEYSPACE_PER_TEST
Assert.assertNull(QueryProcessor.instance.getPrepared(preparedSelect1.statementId));
Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedSelect2.statementId));
Assert.assertNull(QueryProcessor.instance.getPrepared(preparedInsert1.statementId));
Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedInsert2.statementId));
}
use of org.apache.cassandra.cql3.functions.Function in project cassandra by apache.
the class UFTest method testBrokenFunction.
@Test
public void testBrokenFunction() throws Throwable {
createTable("CREATE TABLE %s (key int primary key, dval double)");
execute("INSERT INTO %s (key, dval) VALUES (?, ?)", 1, 1d);
String fName = createFunction(KEYSPACE_PER_TEST, "double", "CREATE OR REPLACE FUNCTION %s(val double) " + "RETURNS NULL ON NULL INPUT " + "RETURNS double " + "LANGUAGE JAVA\n" + "AS 'throw new RuntimeException();';");
KeyspaceMetadata ksm = Schema.instance.getKeyspaceMetadata(KEYSPACE_PER_TEST);
UDFunction f = (UDFunction) ksm.functions.get(parseFunctionName(fName)).iterator().next();
UDFunction broken = UDFunction.createBrokenFunction(f.name(), f.argNames(), f.argTypes(), f.returnType(), true, "java", f.body(), new InvalidRequestException("foo bar is broken"));
Schema.instance.load(ksm.withSwapped(ksm.functions.without(f.name(), f.argTypes()).with(broken)));
assertInvalidThrowMessage("foo bar is broken", InvalidRequestException.class, "SELECT key, " + fName + "(dval) FROM %s");
}
use of org.apache.cassandra.cql3.functions.Function in project cassandra by apache.
the class PartitionImplementationTest method testIter.
private void testIter(Supplier<Collection<? extends Unfiltered>> contentSupplier, Row staticRow) {
NavigableSet<Clusterable> sortedContent = new TreeSet<Clusterable>(metadata.comparator);
sortedContent.addAll(contentSupplier.get());
AbstractBTreePartition partition;
try (UnfilteredRowIterator iter = new Util.UnfilteredSource(metadata, Util.dk("pk"), staticRow, sortedContent.stream().map(x -> (Unfiltered) x).iterator())) {
partition = ImmutableBTreePartition.create(iter);
}
ColumnMetadata defCol = metadata.getColumn(new ColumnIdentifier("col", true));
ColumnFilter cf = ColumnFilter.selectionBuilder().add(defCol).build();
Function<? super Clusterable, ? extends Clusterable> colFilter = x -> x instanceof Row ? ((Row) x).filter(cf, metadata) : x;
Slices slices = Slices.with(metadata.comparator, Slice.make(clustering(KEY_RANGE / 4), clustering(KEY_RANGE * 3 / 4)));
Slices multiSlices = makeSlices();
// lastRow
assertRowsEqual((Row) get(sortedContent.descendingSet(), x -> x instanceof Row), partition.lastRow());
// get(static)
assertRowsEqual(staticRow, partition.getRow(Clustering.STATIC_CLUSTERING));
// get
for (int i = 0; i < KEY_RANGE; ++i) {
Clustering<?> cl = clustering(i);
assertRowsEqual(getRow(sortedContent, cl), partition.getRow(cl));
}
// isEmpty
assertEquals(sortedContent.isEmpty() && staticRow == null, partition.isEmpty());
// hasRows
assertEquals(sortedContent.stream().anyMatch(x -> x instanceof Row), partition.hasRows());
// iterator
assertIteratorsEqual(sortedContent.stream().filter(x -> x instanceof Row).iterator(), partition.iterator());
// unfiltered iterator
assertIteratorsEqual(sortedContent.iterator(), partition.unfilteredIterator());
// unfiltered iterator
assertIteratorsEqual(sortedContent.iterator(), partition.unfilteredIterator(ColumnFilter.all(metadata), Slices.ALL, false));
// column-filtered
assertIteratorsEqual(sortedContent.stream().map(colFilter).iterator(), partition.unfilteredIterator(cf, Slices.ALL, false));
// sliced
assertIteratorsEqual(slice(sortedContent, slices.get(0)), partition.unfilteredIterator(ColumnFilter.all(metadata), slices, false));
assertIteratorsEqual(streamOf(slice(sortedContent, slices.get(0))).map(colFilter).iterator(), partition.unfilteredIterator(cf, slices, false));
// randomly multi-sliced
assertIteratorsEqual(slice(sortedContent, multiSlices), partition.unfilteredIterator(ColumnFilter.all(metadata), multiSlices, false));
assertIteratorsEqual(streamOf(slice(sortedContent, multiSlices)).map(colFilter).iterator(), partition.unfilteredIterator(cf, multiSlices, false));
// reversed
assertIteratorsEqual(sortedContent.descendingIterator(), partition.unfilteredIterator(ColumnFilter.all(metadata), Slices.ALL, true));
assertIteratorsEqual(sortedContent.descendingSet().stream().map(colFilter).iterator(), partition.unfilteredIterator(cf, Slices.ALL, true));
assertIteratorsEqual(invert(slice(sortedContent, slices.get(0))), partition.unfilteredIterator(ColumnFilter.all(metadata), slices, true));
assertIteratorsEqual(streamOf(invert(slice(sortedContent, slices.get(0)))).map(colFilter).iterator(), partition.unfilteredIterator(cf, slices, true));
assertIteratorsEqual(invert(slice(sortedContent, multiSlices)), partition.unfilteredIterator(ColumnFilter.all(metadata), multiSlices, true));
assertIteratorsEqual(streamOf(invert(slice(sortedContent, multiSlices))).map(colFilter).iterator(), partition.unfilteredIterator(cf, multiSlices, true));
// clustering iterator
testClusteringsIterator(sortedContent, partition, ColumnFilter.all(metadata), false);
testClusteringsIterator(sortedContent, partition, cf, false);
testClusteringsIterator(sortedContent, partition, ColumnFilter.all(metadata), true);
testClusteringsIterator(sortedContent, partition, cf, true);
// sliceable iter
testSlicingOfIterators(sortedContent, partition, ColumnFilter.all(metadata), false);
testSlicingOfIterators(sortedContent, partition, cf, false);
testSlicingOfIterators(sortedContent, partition, ColumnFilter.all(metadata), true);
testSlicingOfIterators(sortedContent, partition, cf, true);
}
use of org.apache.cassandra.cql3.functions.Function in project cassandra by apache.
the class SSTableHeaderFixTest method buildFakeSSTable.
private File buildFakeSSTable(File dir, int generation, TableMetadata.Builder cols, Function<ColumnMetadata, ColumnMetadata> freezer) {
TableMetadata headerMetadata = cols.build();
TableMetadata.Builder schemaCols = TableMetadata.builder("ks", "cf");
for (ColumnMetadata cm : cols.columns()) schemaCols.addColumn(freezer.apply(cm));
tableMetadata = schemaCols.build();
try {
Descriptor desc = new Descriptor(version, dir, "ks", "cf", generation, SSTableFormat.Type.BIG);
// Just create the component files - we don't really need those.
for (Component component : requiredComponents) assertTrue(new File(desc.filenameFor(component)).createFileIfNotExists());
AbstractType<?> partitionKey = headerMetadata.partitionKeyType;
List<AbstractType<?>> clusteringKey = headerMetadata.clusteringColumns().stream().map(cd -> cd.type).collect(Collectors.toList());
Map<ByteBuffer, AbstractType<?>> staticColumns = headerMetadata.columns().stream().filter(cd -> cd.kind == ColumnMetadata.Kind.STATIC).collect(Collectors.toMap(cd -> cd.name.bytes, cd -> cd.type, (a, b) -> a));
Map<ByteBuffer, AbstractType<?>> regularColumns = headerMetadata.columns().stream().filter(cd -> cd.kind == ColumnMetadata.Kind.REGULAR).collect(Collectors.toMap(cd -> cd.name.bytes, cd -> cd.type, (a, b) -> a));
File statsFile = new File(desc.filenameFor(Component.STATS));
SerializationHeader.Component header = SerializationHeader.Component.buildComponentForTools(partitionKey, clusteringKey, staticColumns, regularColumns, EncodingStats.NO_STATS);
try (SequentialWriter out = new SequentialWriter(statsFile)) {
desc.getMetadataSerializer().serialize(Collections.singletonMap(MetadataType.HEADER, header), out, version);
out.finish();
}
return new File(desc.filenameFor(Component.DATA));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
Aggregations