use of org.apache.cassandra.cql3.functions.Function in project RxJava by ReactiveX.
the class FlowableDebounceTest method debounceSelectorNormal1.
@Test
public void debounceSelectorNormal1() {
PublishProcessor<Integer> source = PublishProcessor.create();
final PublishProcessor<Integer> debouncer = PublishProcessor.create();
Function<Integer, Flowable<Integer>> debounceSel = new Function<Integer, Flowable<Integer>>() {
@Override
public Flowable<Integer> apply(Integer t1) {
return debouncer;
}
};
Subscriber<Object> subscriber = TestHelper.mockSubscriber();
InOrder inOrder = inOrder(subscriber);
source.debounce(debounceSel).subscribe(subscriber);
source.onNext(1);
debouncer.onNext(1);
source.onNext(2);
source.onNext(3);
source.onNext(4);
debouncer.onNext(2);
source.onNext(5);
source.onComplete();
inOrder.verify(subscriber).onNext(1);
inOrder.verify(subscriber).onNext(4);
inOrder.verify(subscriber).onNext(5);
inOrder.verify(subscriber).onComplete();
verify(subscriber, never()).onError(any(Throwable.class));
}
use of org.apache.cassandra.cql3.functions.Function in project RxRelay by JakeWharton.
the class BehaviorRelayTest method testUnsubscriptionCase.
@Test(timeout = 1000)
public void testUnsubscriptionCase() {
// FIXME was plain null which is not allowed
BehaviorRelay<String> src = BehaviorRelay.createDefault("null");
for (int i = 0; i < 10; i++) {
final Observer<Object> o = TestHelper.mockObserver();
InOrder inOrder = inOrder(o);
String v = "" + i;
src.accept(v);
System.out.printf("Turn: %d%n", i);
src.firstElement().toObservable().flatMap(new Function<String, Observable<String>>() {
@Override
public Observable<String> apply(String t1) {
return Observable.just(t1 + ", " + t1);
}
}).subscribe(new DefaultObserver<String>() {
@Override
public void onNext(String t) {
o.onNext(t);
}
@Override
public void onError(Throwable e) {
o.onError(e);
}
@Override
public void onComplete() {
o.onComplete();
}
});
inOrder.verify(o).onNext(v + ", " + v);
inOrder.verify(o).onComplete();
verify(o, never()).onError(any(Throwable.class));
}
}
use of org.apache.cassandra.cql3.functions.Function in project cassandra by apache.
the class CreateViewStatement method announceMigration.
public Event.SchemaChange announceMigration(QueryState queryState, boolean isLocalOnly) throws RequestValidationException {
// We need to make sure that:
// - primary key includes all columns in base table's primary key
// - make sure that the select statement does not have anything other than columns
// and their names match the base table's names
// - make sure that primary key does not include any collections
// - make sure there is no where clause in the select statement
// - make sure there is not currently a table or view
// - make sure baseTable gcGraceSeconds > 0
properties.validate();
if (properties.useCompactStorage)
throw new InvalidRequestException("Cannot use 'COMPACT STORAGE' when defining a materialized view");
// specific replica would break
if (!baseName.getKeyspace().equals(keyspace()))
throw new InvalidRequestException("Cannot create a materialized view on a table in a separate keyspace");
TableMetadata metadata = Schema.instance.validateTable(baseName.getKeyspace(), baseName.getColumnFamily());
if (metadata.isCounter())
throw new InvalidRequestException("Materialized views are not supported on counter tables");
if (metadata.isView())
throw new InvalidRequestException("Materialized views cannot be created against other materialized views");
if (metadata.params.gcGraceSeconds == 0) {
throw new InvalidRequestException(String.format("Cannot create materialized view '%s' for base table " + "'%s' with gc_grace_seconds of 0, since this value is " + "used to TTL undelivered updates. Setting gc_grace_seconds" + " too low might cause undelivered updates to expire " + "before being replayed.", cfName.getColumnFamily(), baseName.getColumnFamily()));
}
Set<ColumnIdentifier> included = Sets.newHashSetWithExpectedSize(selectClause.size());
for (RawSelector selector : selectClause) {
Selectable.Raw selectable = selector.selectable;
if (selectable instanceof Selectable.WithFieldSelection.Raw)
throw new InvalidRequestException("Cannot select out a part of type when defining a materialized view");
if (selectable instanceof Selectable.WithFunction.Raw)
throw new InvalidRequestException("Cannot use function when defining a materialized view");
if (selectable instanceof Selectable.WritetimeOrTTL.Raw)
throw new InvalidRequestException("Cannot use function when defining a materialized view");
if (selector.alias != null)
throw new InvalidRequestException("Cannot use alias when defining a materialized view");
Selectable s = selectable.prepare(metadata);
if (s instanceof Term.Raw)
throw new InvalidRequestException("Cannot use terms in selection when defining a materialized view");
ColumnMetadata cdef = (ColumnMetadata) s;
included.add(cdef.name);
}
Set<ColumnMetadata.Raw> targetPrimaryKeys = new HashSet<>();
for (ColumnMetadata.Raw identifier : Iterables.concat(partitionKeys, clusteringKeys)) {
if (!targetPrimaryKeys.add(identifier))
throw new InvalidRequestException("Duplicate entry found in PRIMARY KEY: " + identifier);
ColumnMetadata cdef = identifier.prepare(metadata);
if (cdef.type.isMultiCell())
throw new InvalidRequestException(String.format("Cannot use MultiCell column '%s' in PRIMARY KEY of materialized view", identifier));
if (cdef.isStatic())
throw new InvalidRequestException(String.format("Cannot use Static column '%s' in PRIMARY KEY of materialized view", identifier));
if (cdef.type instanceof DurationType)
throw new InvalidRequestException(String.format("Cannot use Duration column '%s' in PRIMARY KEY of materialized view", identifier));
}
// build the select statement
Map<ColumnMetadata.Raw, Boolean> orderings = Collections.emptyMap();
List<ColumnMetadata.Raw> groups = Collections.emptyList();
SelectStatement.Parameters parameters = new SelectStatement.Parameters(orderings, groups, false, true, false);
SelectStatement.RawStatement rawSelect = new SelectStatement.RawStatement(baseName, parameters, selectClause, whereClause, null, null);
ClientState state = ClientState.forInternalCalls();
state.setKeyspace(keyspace());
rawSelect.prepareKeyspace(state);
rawSelect.setBoundVariables(getBoundVariables());
ParsedStatement.Prepared prepared = rawSelect.prepare(true);
SelectStatement select = (SelectStatement) prepared.statement;
StatementRestrictions restrictions = select.getRestrictions();
if (!prepared.boundNames.isEmpty())
throw new InvalidRequestException("Cannot use query parameters in CREATE MATERIALIZED VIEW statements");
String whereClauseText = View.relationsToWhereClause(whereClause.relations);
Set<ColumnIdentifier> basePrimaryKeyCols = new HashSet<>();
for (ColumnMetadata definition : Iterables.concat(metadata.partitionKeyColumns(), metadata.clusteringColumns())) basePrimaryKeyCols.add(definition.name);
List<ColumnIdentifier> targetClusteringColumns = new ArrayList<>();
List<ColumnIdentifier> targetPartitionKeys = new ArrayList<>();
// This is only used as an intermediate state; this is to catch whether multiple non-PK columns are used
boolean hasNonPKColumn = false;
for (ColumnMetadata.Raw raw : partitionKeys) hasNonPKColumn |= getColumnIdentifier(metadata, basePrimaryKeyCols, hasNonPKColumn, raw, targetPartitionKeys, restrictions);
for (ColumnMetadata.Raw raw : clusteringKeys) hasNonPKColumn |= getColumnIdentifier(metadata, basePrimaryKeyCols, hasNonPKColumn, raw, targetClusteringColumns, restrictions);
// We need to include all of the primary key columns from the base table in order to make sure that we do not
// overwrite values in the view. We cannot support "collapsing" the base table into a smaller number of rows in
// the view because if we need to generate a tombstone, we have no way of knowing which value is currently being
// used in the view and whether or not to generate a tombstone. In order to not surprise our users, we require
// that they include all of the columns. We provide them with a list of all of the columns left to include.
boolean missingClusteringColumns = false;
StringBuilder columnNames = new StringBuilder();
List<ColumnIdentifier> includedColumns = new ArrayList<>();
for (ColumnMetadata def : metadata.columns()) {
ColumnIdentifier identifier = def.name;
boolean includeDef = included.isEmpty() || included.contains(identifier);
if (includeDef && def.isStatic()) {
throw new InvalidRequestException(String.format("Unable to include static column '%s' which would be included by Materialized View SELECT * statement", identifier));
}
boolean defInTargetPrimaryKey = targetClusteringColumns.contains(identifier) || targetPartitionKeys.contains(identifier);
if (includeDef && !defInTargetPrimaryKey) {
includedColumns.add(identifier);
}
if (!def.isPrimaryKeyColumn())
continue;
if (!defInTargetPrimaryKey) {
if (missingClusteringColumns)
columnNames.append(',');
else
missingClusteringColumns = true;
columnNames.append(identifier);
}
}
if (missingClusteringColumns)
throw new InvalidRequestException(String.format("Cannot create Materialized View %s without primary key columns from base %s (%s)", columnFamily(), baseName.getColumnFamily(), columnNames.toString()));
if (targetPartitionKeys.isEmpty())
throw new InvalidRequestException("Must select at least a column for a Materialized View");
if (targetClusteringColumns.isEmpty())
throw new InvalidRequestException("No columns are defined for Materialized View other than primary key");
TableParams params = properties.properties.asNewTableParams();
if (params.defaultTimeToLive > 0) {
throw new InvalidRequestException("Cannot set default_time_to_live for a materialized view. " + "Data in a materialized view always expire at the same time than " + "the corresponding data in the parent table.");
}
TableMetadata.Builder builder = TableMetadata.builder(keyspace(), columnFamily(), properties.properties.getId()).isView(true).params(params);
add(metadata, targetPartitionKeys, builder::addPartitionKeyColumn);
add(metadata, targetClusteringColumns, builder::addClusteringColumn);
add(metadata, includedColumns, builder::addRegularColumn);
ViewMetadata definition = new ViewMetadata(keyspace(), columnFamily(), metadata.id, metadata.name, included.isEmpty(), rawSelect, whereClauseText, builder.build());
try {
MigrationManager.announceNewView(definition, isLocalOnly);
return new Event.SchemaChange(Event.SchemaChange.Change.CREATED, Event.SchemaChange.Target.TABLE, keyspace(), columnFamily());
} catch (AlreadyExistsException e) {
if (ifNotExists)
return null;
throw e;
}
}
use of org.apache.cassandra.cql3.functions.Function in project cassandra by apache.
the class DropAggregateStatement method announceMigration.
public Event.SchemaChange announceMigration(QueryState queryState, boolean isLocalOnly) throws RequestValidationException {
Collection<Function> olds = Schema.instance.getFunctions(functionName);
if (!argsPresent && olds != null && olds.size() > 1)
throw new InvalidRequestException(String.format("'DROP AGGREGATE %s' matches multiple function definitions; " + "specify the argument types by issuing a statement like " + "'DROP AGGREGATE %s (type, type, ...)'. Hint: use cqlsh " + "'DESCRIBE AGGREGATE %s' command to find all overloads", functionName, functionName, functionName));
Function old = null;
if (argsPresent) {
if (Schema.instance.getKeyspaceMetadata(functionName.keyspace) != null) {
List<AbstractType<?>> argTypes = new ArrayList<>(argRawTypes.size());
for (CQL3Type.Raw rawType : argRawTypes) argTypes.add(prepareType("arguments", rawType));
old = Schema.instance.findFunction(functionName, argTypes).orElse(null);
}
if (old == null || !(old instanceof AggregateFunction)) {
if (ifExists)
return null;
// just build a nicer error message
StringBuilder sb = new StringBuilder();
for (CQL3Type.Raw rawType : argRawTypes) {
if (sb.length() > 0)
sb.append(", ");
sb.append(rawType);
}
throw new InvalidRequestException(String.format("Cannot drop non existing aggregate '%s(%s)'", functionName, sb));
}
} else {
if (olds == null || olds.isEmpty() || !(olds.iterator().next() instanceof AggregateFunction)) {
if (ifExists)
return null;
throw new InvalidRequestException(String.format("Cannot drop non existing aggregate '%s'", functionName));
}
old = olds.iterator().next();
}
if (old.isNative())
throw new InvalidRequestException(String.format("Cannot drop aggregate '%s' because it is a " + "native (built-in) function", functionName));
MigrationManager.announceAggregateDrop((UDAggregate) old, isLocalOnly);
return new Event.SchemaChange(Event.SchemaChange.Change.DROPPED, Event.SchemaChange.Target.AGGREGATE, old.name().keyspace, old.name().name, AbstractType.asCQLTypeStringList(old.argTypes()));
}
use of org.apache.cassandra.cql3.functions.Function in project cassandra by apache.
the class AggregationTest method testBrokenAggregate.
@Test
public void testBrokenAggregate() throws Throwable {
createTable("CREATE TABLE %s (key int primary key, val int)");
execute("INSERT INTO %s (key, val) VALUES (?, ?)", 1, 1);
String fState = createFunction(KEYSPACE, "int, int", "CREATE FUNCTION %s(a int, b int) " + "CALLED ON NULL INPUT " + "RETURNS int " + "LANGUAGE javascript " + "AS 'a + b;'");
String a = createAggregate(KEYSPACE, "int", "CREATE AGGREGATE %s(int) " + "SFUNC " + shortFunctionName(fState) + " " + "STYPE int ");
KeyspaceMetadata ksm = Schema.instance.getKeyspaceMetadata(keyspace());
UDAggregate f = (UDAggregate) ksm.functions.get(parseFunctionName(a)).iterator().next();
UDAggregate broken = UDAggregate.createBroken(f.name(), f.argTypes(), f.returnType(), null, new InvalidRequestException("foo bar is broken"));
Schema.instance.load(ksm.withSwapped(ksm.functions.without(f.name(), f.argTypes()).with(broken)));
assertInvalidThrowMessage("foo bar is broken", InvalidRequestException.class, "SELECT " + a + "(val) FROM %s");
}
Aggregations