Search in sources :

Example 16 with Function

use of org.apache.cassandra.cql3.functions.Function in project cassandra by apache.

the class UFAuthTest method functionResource.

private FunctionResource functionResource(String functionName) {
    // Note that this is somewhat brittle as it assumes that function names are
    // truly unique. As such, it will break in the face of overloading.
    // It is here to avoid having to duplicate the functionality of CqlParser
    // for transforming cql types into AbstractTypes
    FunctionName fn = parseFunctionName(functionName);
    Collection<Function> functions = Schema.instance.getFunctions(fn);
    assertEquals(String.format("Expected a single function definition for %s, but found %s", functionName, functions.size()), 1, functions.size());
    return FunctionResource.function(fn.keyspace, fn.name, functions.iterator().next().argTypes());
}
Also used : FunctionName(org.apache.cassandra.cql3.functions.FunctionName) Function(org.apache.cassandra.cql3.functions.Function)

Example 17 with Function

use of org.apache.cassandra.cql3.functions.Function in project cassandra by apache.

the class UFTest method testFunctionDropPreparedStatement.

@Test
public void testFunctionDropPreparedStatement() throws Throwable {
    createTable("CREATE TABLE %s (key int PRIMARY KEY, d double)");
    String fSin = createFunction(KEYSPACE_PER_TEST, "double", "CREATE FUNCTION %s ( input double ) " + "CALLED ON NULL INPUT " + "RETURNS double " + "LANGUAGE java " + "AS 'return Double.valueOf(Math.sin(input.doubleValue()));'");
    FunctionName fSinName = parseFunctionName(fSin);
    Assert.assertEquals(1, Schema.instance.getFunctions(parseFunctionName(fSin)).size());
    // create a pairs of Select and Inserts. One statement in each pair uses the function so when we
    // drop it those statements should be removed from the cache in QueryProcessor. The other statements
    // should be unaffected.
    ResultMessage.Prepared preparedSelect1 = QueryProcessor.instance.prepare(String.format("SELECT key, %s(d) FROM %s.%s", fSin, KEYSPACE, currentTable()), ClientState.forInternalCalls());
    ResultMessage.Prepared preparedSelect2 = QueryProcessor.instance.prepare(String.format("SELECT key FROM %s.%s", KEYSPACE, currentTable()), ClientState.forInternalCalls());
    ResultMessage.Prepared preparedInsert1 = QueryProcessor.instance.prepare(String.format("INSERT INTO %s.%s (key, d) VALUES (?, %s(?))", KEYSPACE, currentTable(), fSin), ClientState.forInternalCalls());
    ResultMessage.Prepared preparedInsert2 = QueryProcessor.instance.prepare(String.format("INSERT INTO %s.%s (key, d) VALUES (?, ?)", KEYSPACE, currentTable()), ClientState.forInternalCalls());
    Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedSelect1.statementId));
    Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedSelect2.statementId));
    Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedInsert1.statementId));
    Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedInsert2.statementId));
    execute("DROP FUNCTION " + fSin + "(double);");
    // the statements which use the dropped function should be removed from cache, with the others remaining
    Assert.assertNull(QueryProcessor.instance.getPrepared(preparedSelect1.statementId));
    Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedSelect2.statementId));
    Assert.assertNull(QueryProcessor.instance.getPrepared(preparedInsert1.statementId));
    Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedInsert2.statementId));
    execute("CREATE FUNCTION " + fSin + " ( input double ) " + "RETURNS NULL ON NULL INPUT " + "RETURNS double " + "LANGUAGE java " + "AS 'return Double.valueOf(Math.sin(input));'");
    Assert.assertEquals(1, Schema.instance.getFunctions(fSinName).size());
    preparedSelect1 = QueryProcessor.instance.prepare(String.format("SELECT key, %s(d) FROM %s.%s", fSin, KEYSPACE, currentTable()), ClientState.forInternalCalls());
    preparedInsert1 = QueryProcessor.instance.prepare(String.format("INSERT INTO %s.%s (key, d) VALUES (?, %s(?))", KEYSPACE, currentTable(), fSin), ClientState.forInternalCalls());
    Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedSelect1.statementId));
    Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedInsert1.statementId));
    dropPerTestKeyspace();
    // again, only the 2 statements referencing the function should be removed from cache
    // this time because the statements select from tables in KEYSPACE, only the function
    // is scoped to KEYSPACE_PER_TEST
    Assert.assertNull(QueryProcessor.instance.getPrepared(preparedSelect1.statementId));
    Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedSelect2.statementId));
    Assert.assertNull(QueryProcessor.instance.getPrepared(preparedInsert1.statementId));
    Assert.assertNotNull(QueryProcessor.instance.getPrepared(preparedInsert2.statementId));
}
Also used : FunctionName(org.apache.cassandra.cql3.functions.FunctionName) ResultMessage(org.apache.cassandra.transport.messages.ResultMessage) Test(org.junit.Test)

Example 18 with Function

use of org.apache.cassandra.cql3.functions.Function in project cassandra by apache.

the class UFTest method testBrokenFunction.

@Test
public void testBrokenFunction() throws Throwable {
    createTable("CREATE TABLE %s (key int primary key, dval double)");
    execute("INSERT INTO %s (key, dval) VALUES (?, ?)", 1, 1d);
    String fName = createFunction(KEYSPACE_PER_TEST, "double", "CREATE OR REPLACE FUNCTION %s(val double) " + "RETURNS NULL ON NULL INPUT " + "RETURNS double " + "LANGUAGE JAVA\n" + "AS 'throw new RuntimeException();';");
    KeyspaceMetadata ksm = Schema.instance.getKeyspaceMetadata(KEYSPACE_PER_TEST);
    UDFunction f = (UDFunction) ksm.functions.get(parseFunctionName(fName)).iterator().next();
    UDFunction broken = UDFunction.createBrokenFunction(f.name(), f.argNames(), f.argTypes(), f.returnType(), true, "java", f.body(), new InvalidRequestException("foo bar is broken"));
    Schema.instance.load(ksm.withSwapped(ksm.functions.without(f.name(), f.argTypes()).with(broken)));
    assertInvalidThrowMessage("foo bar is broken", InvalidRequestException.class, "SELECT key, " + fName + "(dval) FROM %s");
}
Also used : InvalidRequestException(org.apache.cassandra.exceptions.InvalidRequestException) KeyspaceMetadata(org.apache.cassandra.schema.KeyspaceMetadata) JavaBasedUDFunction(org.apache.cassandra.cql3.functions.JavaBasedUDFunction) UDFunction(org.apache.cassandra.cql3.functions.UDFunction) Test(org.junit.Test)

Example 19 with Function

use of org.apache.cassandra.cql3.functions.Function in project cassandra by apache.

the class PartitionImplementationTest method testIter.

private void testIter(Supplier<Collection<? extends Unfiltered>> contentSupplier, Row staticRow) {
    NavigableSet<Clusterable> sortedContent = new TreeSet<Clusterable>(metadata.comparator);
    sortedContent.addAll(contentSupplier.get());
    AbstractBTreePartition partition;
    try (UnfilteredRowIterator iter = new Util.UnfilteredSource(metadata, Util.dk("pk"), staticRow, sortedContent.stream().map(x -> (Unfiltered) x).iterator())) {
        partition = ImmutableBTreePartition.create(iter);
    }
    ColumnMetadata defCol = metadata.getColumn(new ColumnIdentifier("col", true));
    ColumnFilter cf = ColumnFilter.selectionBuilder().add(defCol).build();
    Function<? super Clusterable, ? extends Clusterable> colFilter = x -> x instanceof Row ? ((Row) x).filter(cf, metadata) : x;
    Slices slices = Slices.with(metadata.comparator, Slice.make(clustering(KEY_RANGE / 4), clustering(KEY_RANGE * 3 / 4)));
    Slices multiSlices = makeSlices();
    // lastRow
    assertRowsEqual((Row) get(sortedContent.descendingSet(), x -> x instanceof Row), partition.lastRow());
    // get(static)
    assertRowsEqual(staticRow, partition.getRow(Clustering.STATIC_CLUSTERING));
    // get
    for (int i = 0; i < KEY_RANGE; ++i) {
        Clustering<?> cl = clustering(i);
        assertRowsEqual(getRow(sortedContent, cl), partition.getRow(cl));
    }
    // isEmpty
    assertEquals(sortedContent.isEmpty() && staticRow == null, partition.isEmpty());
    // hasRows
    assertEquals(sortedContent.stream().anyMatch(x -> x instanceof Row), partition.hasRows());
    // iterator
    assertIteratorsEqual(sortedContent.stream().filter(x -> x instanceof Row).iterator(), partition.iterator());
    // unfiltered iterator
    assertIteratorsEqual(sortedContent.iterator(), partition.unfilteredIterator());
    // unfiltered iterator
    assertIteratorsEqual(sortedContent.iterator(), partition.unfilteredIterator(ColumnFilter.all(metadata), Slices.ALL, false));
    // column-filtered
    assertIteratorsEqual(sortedContent.stream().map(colFilter).iterator(), partition.unfilteredIterator(cf, Slices.ALL, false));
    // sliced
    assertIteratorsEqual(slice(sortedContent, slices.get(0)), partition.unfilteredIterator(ColumnFilter.all(metadata), slices, false));
    assertIteratorsEqual(streamOf(slice(sortedContent, slices.get(0))).map(colFilter).iterator(), partition.unfilteredIterator(cf, slices, false));
    // randomly multi-sliced
    assertIteratorsEqual(slice(sortedContent, multiSlices), partition.unfilteredIterator(ColumnFilter.all(metadata), multiSlices, false));
    assertIteratorsEqual(streamOf(slice(sortedContent, multiSlices)).map(colFilter).iterator(), partition.unfilteredIterator(cf, multiSlices, false));
    // reversed
    assertIteratorsEqual(sortedContent.descendingIterator(), partition.unfilteredIterator(ColumnFilter.all(metadata), Slices.ALL, true));
    assertIteratorsEqual(sortedContent.descendingSet().stream().map(colFilter).iterator(), partition.unfilteredIterator(cf, Slices.ALL, true));
    assertIteratorsEqual(invert(slice(sortedContent, slices.get(0))), partition.unfilteredIterator(ColumnFilter.all(metadata), slices, true));
    assertIteratorsEqual(streamOf(invert(slice(sortedContent, slices.get(0)))).map(colFilter).iterator(), partition.unfilteredIterator(cf, slices, true));
    assertIteratorsEqual(invert(slice(sortedContent, multiSlices)), partition.unfilteredIterator(ColumnFilter.all(metadata), multiSlices, true));
    assertIteratorsEqual(streamOf(invert(slice(sortedContent, multiSlices))).map(colFilter).iterator(), partition.unfilteredIterator(cf, multiSlices, true));
    // clustering iterator
    testClusteringsIterator(sortedContent, partition, ColumnFilter.all(metadata), false);
    testClusteringsIterator(sortedContent, partition, cf, false);
    testClusteringsIterator(sortedContent, partition, ColumnFilter.all(metadata), true);
    testClusteringsIterator(sortedContent, partition, cf, true);
    // sliceable iter
    testSlicingOfIterators(sortedContent, partition, ColumnFilter.all(metadata), false);
    testSlicingOfIterators(sortedContent, partition, cf, false);
    testSlicingOfIterators(sortedContent, partition, ColumnFilter.all(metadata), true);
    testSlicingOfIterators(sortedContent, partition, cf, true);
}
Also used : AbstractBTreePartition(org.apache.cassandra.db.partitions.AbstractBTreePartition) java.util(java.util) Iterables(com.google.common.collect.Iterables) ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) BeforeClass(org.junit.BeforeClass) org.apache.cassandra.db(org.apache.cassandra.db) Deletion(org.apache.cassandra.db.rows.Row.Deletion) Function(java.util.function.Function) Supplier(java.util.function.Supplier) ByteBuffer(java.nio.ByteBuffer) Iterators(com.google.common.collect.Iterators) org.apache.cassandra.db.rows(org.apache.cassandra.db.rows) Partition(org.apache.cassandra.db.partitions.Partition) ConfigurationException(org.apache.cassandra.exceptions.ConfigurationException) ImmutableBTreePartition(org.apache.cassandra.db.partitions.ImmutableBTreePartition) StreamSupport(java.util.stream.StreamSupport) ColumnFilter(org.apache.cassandra.db.filter.ColumnFilter) Predicate(java.util.function.Predicate) Util(org.apache.cassandra.Util) ByteBufferUtil(org.apache.cassandra.utils.ByteBufferUtil) KeyspaceParams(org.apache.cassandra.schema.KeyspaceParams) Test(org.junit.Test) Collectors(java.util.stream.Collectors) SchemaLoader(org.apache.cassandra.SchemaLoader) Stream(java.util.stream.Stream) ColumnIdentifier(org.apache.cassandra.cql3.ColumnIdentifier) TableMetadata(org.apache.cassandra.schema.TableMetadata) Assert(org.junit.Assert) AsciiType(org.apache.cassandra.db.marshal.AsciiType) ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) ColumnFilter(org.apache.cassandra.db.filter.ColumnFilter) AbstractBTreePartition(org.apache.cassandra.db.partitions.AbstractBTreePartition) ColumnIdentifier(org.apache.cassandra.cql3.ColumnIdentifier)

Example 20 with Function

use of org.apache.cassandra.cql3.functions.Function in project cassandra by apache.

the class SSTableHeaderFixTest method buildFakeSSTable.

private File buildFakeSSTable(File dir, int generation, TableMetadata.Builder cols, Function<ColumnMetadata, ColumnMetadata> freezer) {
    TableMetadata headerMetadata = cols.build();
    TableMetadata.Builder schemaCols = TableMetadata.builder("ks", "cf");
    for (ColumnMetadata cm : cols.columns()) schemaCols.addColumn(freezer.apply(cm));
    tableMetadata = schemaCols.build();
    try {
        Descriptor desc = new Descriptor(version, dir, "ks", "cf", generation, SSTableFormat.Type.BIG);
        // Just create the component files - we don't really need those.
        for (Component component : requiredComponents) assertTrue(new File(desc.filenameFor(component)).createFileIfNotExists());
        AbstractType<?> partitionKey = headerMetadata.partitionKeyType;
        List<AbstractType<?>> clusteringKey = headerMetadata.clusteringColumns().stream().map(cd -> cd.type).collect(Collectors.toList());
        Map<ByteBuffer, AbstractType<?>> staticColumns = headerMetadata.columns().stream().filter(cd -> cd.kind == ColumnMetadata.Kind.STATIC).collect(Collectors.toMap(cd -> cd.name.bytes, cd -> cd.type, (a, b) -> a));
        Map<ByteBuffer, AbstractType<?>> regularColumns = headerMetadata.columns().stream().filter(cd -> cd.kind == ColumnMetadata.Kind.REGULAR).collect(Collectors.toMap(cd -> cd.name.bytes, cd -> cd.type, (a, b) -> a));
        File statsFile = new File(desc.filenameFor(Component.STATS));
        SerializationHeader.Component header = SerializationHeader.Component.buildComponentForTools(partitionKey, clusteringKey, staticColumns, regularColumns, EncodingStats.NO_STATS);
        try (SequentialWriter out = new SequentialWriter(statsFile)) {
            desc.getMetadataSerializer().serialize(Collections.singletonMap(MetadataType.HEADER, header), out, version);
            out.finish();
        }
        return new File(desc.filenameFor(Component.DATA));
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) Arrays(java.util.Arrays) IndexTarget(org.apache.cassandra.cql3.statements.schema.IndexTarget) File(org.apache.cassandra.io.util.File) AbstractType(org.apache.cassandra.db.marshal.AbstractType) ByteBuffer(java.nio.ByteBuffer) Matcher(java.util.regex.Matcher) Map(java.util.Map) After(org.junit.After) SSTableFormat(org.apache.cassandra.io.sstable.format.SSTableFormat) Assert.fail(org.junit.Assert.fail) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) CollectionType(org.apache.cassandra.db.marshal.CollectionType) CompositeType(org.apache.cassandra.db.marshal.CompositeType) SequentialWriter(org.apache.cassandra.io.util.SequentialWriter) Version(org.apache.cassandra.io.sstable.format.Version) FBUtilities(org.apache.cassandra.utils.FBUtilities) MetadataType(org.apache.cassandra.io.sstable.metadata.MetadataType) Set(java.util.Set) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) List(java.util.List) ColumnIdentifier(org.apache.cassandra.cql3.ColumnIdentifier) FloatType(org.apache.cassandra.db.marshal.FloatType) FileUtils(org.apache.cassandra.io.util.FileUtils) Assert.assertFalse(org.junit.Assert.assertFalse) FrozenType(org.apache.cassandra.db.marshal.FrozenType) TableMetadata(org.apache.cassandra.schema.TableMetadata) Pattern(java.util.regex.Pattern) IntStream(java.util.stream.IntStream) ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) SetType(org.apache.cassandra.db.marshal.SetType) FieldIdentifier(org.apache.cassandra.cql3.FieldIdentifier) Function(java.util.function.Function) HashSet(java.util.HashSet) Int32Type(org.apache.cassandra.db.marshal.Int32Type) ListType(org.apache.cassandra.db.marshal.ListType) UTF8Type(org.apache.cassandra.db.marshal.UTF8Type) AbstractCompositeType(org.apache.cassandra.db.marshal.AbstractCompositeType) TupleType(org.apache.cassandra.db.marshal.TupleType) BigFormat(org.apache.cassandra.io.sstable.format.big.BigFormat) SerializationHeader(org.apache.cassandra.db.SerializationHeader) Before(org.junit.Before) IndexMetadata(org.apache.cassandra.schema.IndexMetadata) ByteBufferUtil(org.apache.cassandra.utils.ByteBufferUtil) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) MapType(org.apache.cassandra.db.marshal.MapType) EncodingStats(org.apache.cassandra.db.rows.EncodingStats) Assert(org.junit.Assert) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) UserType(org.apache.cassandra.db.marshal.UserType) ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) SequentialWriter(org.apache.cassandra.io.util.SequentialWriter) ByteBuffer(java.nio.ByteBuffer) SerializationHeader(org.apache.cassandra.db.SerializationHeader) AbstractType(org.apache.cassandra.db.marshal.AbstractType) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) File(org.apache.cassandra.io.util.File)

Aggregations

Test (org.junit.Test)34 Function (io.reactivex.rxjava3.functions.Function)20 FunctionName (org.apache.cassandra.cql3.functions.FunctionName)13 ByteBuffer (java.nio.ByteBuffer)7 Function (org.apache.cassandra.cql3.functions.Function)7 AbstractType (org.apache.cassandra.db.marshal.AbstractType)6 InvalidRequestException (org.apache.cassandra.exceptions.InvalidRequestException)6 KeyspaceMetadata (org.apache.cassandra.schema.KeyspaceMetadata)6 TableMetadata (org.apache.cassandra.schema.TableMetadata)6 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)5 ColumnMetadata (org.apache.cassandra.schema.ColumnMetadata)5 ProtocolVersion (org.apache.cassandra.transport.ProtocolVersion)5 InOrder (org.mockito.InOrder)5 List (java.util.List)4 UntypedResultSet (org.apache.cassandra.cql3.UntypedResultSet)4 UDFunction (org.apache.cassandra.cql3.functions.UDFunction)4 ViewMetadata (org.apache.cassandra.schema.ViewMetadata)4 TestException (io.reactivex.rxjava3.exceptions.TestException)3 java.util (java.util)3 ArrayList (java.util.ArrayList)3