use of org.apache.cassandra.db.marshal.AbstractType in project cassandra by apache.
the class DropAggregateStatement method apply.
public Keyspaces apply(Keyspaces schema) {
String name = argumentsSpeficied ? format("%s.%s(%s)", keyspaceName, aggregateName, join(", ", transform(arguments, CQL3Type.Raw::toString))) : format("%s.%s", keyspaceName, aggregateName);
KeyspaceMetadata keyspace = schema.getNullable(keyspaceName);
if (null == keyspace) {
if (ifExists)
return schema;
throw ire("Aggregate '%s' doesn't exist", name);
}
Collection<Function> aggregates = keyspace.functions.get(new FunctionName(keyspaceName, aggregateName));
if (aggregates.size() > 1 && !argumentsSpeficied) {
throw ire("'DROP AGGREGATE %s' matches multiple function definitions; " + "specify the argument types by issuing a statement like " + "'DROP AGGREGATE %s (type, type, ...)'. You can use cqlsh " + "'DESCRIBE AGGREGATE %s' command to find all overloads", aggregateName, aggregateName, aggregateName);
}
arguments.stream().filter(raw -> !raw.isTuple() && raw.isFrozen()).findFirst().ifPresent(t -> {
throw ire("Argument '%s' cannot be frozen; remove frozen<> modifier from '%s'", t, t);
});
List<AbstractType<?>> argumentTypes = prepareArgumentTypes(keyspace.types);
Predicate<Function> filter = Functions.Filter.UDA;
if (argumentsSpeficied)
filter = filter.and(f -> Functions.typesMatch(f.argTypes(), argumentTypes));
Function aggregate = aggregates.stream().filter(filter).findAny().orElse(null);
if (null == aggregate) {
if (ifExists)
return schema;
throw ire("Aggregate '%s' doesn't exist", name);
}
return schema.withAddedOrUpdated(keyspace.withSwapped(keyspace.functions.without(aggregate)));
}
use of org.apache.cassandra.db.marshal.AbstractType in project cassandra by apache.
the class CassandraGenerators method createColumnDefinition.
private static ColumnMetadata createColumnDefinition(String ks, String table, ColumnMetadata.Kind kind, Set<String> createdColumnNames, /* This is mutated to check for collisions, so has a side effect outside of normal random generation */
RandomnessSource rnd) {
Gen<AbstractType<?>> typeGen = AbstractTypeGenerators.typeGen();
switch(kind) {
// empty type is also not supported, so filter out
case PARTITION_KEY:
case CLUSTERING:
typeGen = Generators.filter(typeGen, t -> t != EmptyType.instance).map(AbstractType::freeze);
break;
}
if (kind == ColumnMetadata.Kind.CLUSTERING) {
// when working on a clustering column, add in reversed types periodically
typeGen = allowReversed(typeGen);
}
// filter for unique names
String str;
while (!createdColumnNames.add(str = IDENTIFIER_GEN.generate(rnd))) {
}
ColumnIdentifier name = new ColumnIdentifier(str, true);
int position = !kind.isPrimaryKeyKind() ? -1 : (int) rnd.next(Constraint.between(0, 30));
return new ColumnMetadata(ks, table, name, typeGen.generate(rnd), position, kind);
}
use of org.apache.cassandra.db.marshal.AbstractType in project cassandra by apache.
the class SSTableHeaderFixTest method variousDroppedUserTypes.
@Test
public void variousDroppedUserTypes() throws Exception {
File dir = temporaryFolder;
TableMetadata.Builder cols = TableMetadata.builder("ks", "cf").addPartitionKeyColumn("pk", udtPK).addClusteringColumn("ck", udtCK);
ColSpec[] colSpecs = new ColSpec[] { // 'frozen<udt>' / live
new ColSpec("frozen_udt_as_frozen_udt_live", makeUDT2("frozen_udt_as_frozen_udt_live", false), makeUDT2("frozen_udt_as_frozen_udt_live", false), false, false), // 'frozen<udt>' / live / as 'udt'
new ColSpec("frozen_udt_as_unfrozen_udt_live", makeUDT2("frozen_udt_as_unfrozen_udt_live", false), makeUDT2("frozen_udt_as_unfrozen_udt_live", true), false, true), // 'frozen<udt>' / dropped
new ColSpec("frozen_udt_as_frozen_udt_dropped", makeUDT2("frozen_udt_as_frozen_udt_dropped", true).freezeNestedMulticellTypes().freeze().expandUserTypes(), makeUDT2("frozen_udt_as_frozen_udt_dropped", false), makeUDT2("frozen_udt_as_frozen_udt_dropped", false), true, false), // 'frozen<udt>' / dropped / as 'udt'
new ColSpec("frozen_udt_as_unfrozen_udt_dropped", makeUDT2("frozen_udt_as_unfrozen_udt_dropped", true).freezeNestedMulticellTypes().freeze().expandUserTypes(), makeUDT2("frozen_udt_as_unfrozen_udt_dropped", true), makeUDT2("frozen_udt_as_unfrozen_udt_dropped", false), true, true), // 'udt' / live
new ColSpec("unfrozen_udt_as_unfrozen_udt_live", makeUDT2("unfrozen_udt_as_unfrozen_udt_live", true), makeUDT2("unfrozen_udt_as_unfrozen_udt_live", true), false, false), // 'frozen<tuple>' as 'TupleType(multiCell=false' (there is nothing like 'FrozenType(TupleType(')
new ColSpec("frozen_tuple_as_frozen_tuple_live", makeTupleSimple(), makeTupleSimple(), false, false), // 'frozen<tuple>' as 'TupleType(multiCell=false' (there is nothing like 'FrozenType(TupleType(')
new ColSpec("frozen_tuple_as_frozen_tuple_dropped", makeTupleSimple(), makeTupleSimple(), true, false) };
Arrays.stream(colSpecs).forEach(c -> cols.addRegularColumn(c.name, // use the initial column type for the serialization header header.
c.preFix));
Map<String, ColSpec> colSpecMap = Arrays.stream(colSpecs).collect(Collectors.toMap(c -> c.name, c -> c));
File sstable = buildFakeSSTable(dir, 1, cols, c -> {
ColSpec cs = colSpecMap.get(c.name.toString());
if (cs == null)
return c;
// update the column type in the schema to the "correct" one.
return c.withNewType(cs.schema);
});
Arrays.stream(colSpecs).filter(c -> c.dropped).forEach(c -> {
ColumnMetadata cd = getColDef(c.name);
tableMetadata = tableMetadata.unbuild().removeRegularOrStaticColumn(cd.name).recordColumnDrop(cd, FBUtilities.timestampMicros()).build();
});
SerializationHeader.Component header = readHeader(sstable);
for (ColSpec colSpec : colSpecs) {
AbstractType<?> hdrType = header.getRegularColumns().get(ByteBufferUtil.bytes(colSpec.name));
assertEquals(colSpec.name, colSpec.preFix, hdrType);
assertEquals(colSpec.name, colSpec.preFix.isMultiCell(), hdrType.isMultiCell());
}
SSTableHeaderFix headerFix = builder().withPath(sstable.toPath()).build();
headerFix.execute();
assertFalse(headerFix.hasError());
assertTrue(headerFix.hasChanges());
// Verify that all columns to fix are in the updatedColumns set (paranoid, yet)
Arrays.stream(colSpecs).filter(c -> c.mustFix).forEach(c -> assertTrue("expect " + c.name + " to be updated, but was not (" + updatedColumns + ")", updatedColumns.contains(c.name)));
// Verify that the number of updated columns maches the expected number of columns to fix
assertEquals(Arrays.stream(colSpecs).filter(c -> c.mustFix).count(), updatedColumns.size());
header = readHeader(sstable);
for (ColSpec colSpec : colSpecs) {
AbstractType<?> hdrType = header.getRegularColumns().get(ByteBufferUtil.bytes(colSpec.name));
assertEquals(colSpec.name, colSpec.expect, hdrType);
assertEquals(colSpec.name, colSpec.expect.isMultiCell(), hdrType.isMultiCell());
}
}
use of org.apache.cassandra.db.marshal.AbstractType in project eiger by wlloyd.
the class SliceQueryFilter method collectReducedColumns.
public void collectReducedColumns(IColumnContainer container, Iterator<IColumn> reducedColumns, int gcBefore) {
int liveColumns = 0;
AbstractType comparator = container.getComparator();
while (reducedColumns.hasNext()) {
if (liveColumns >= count)
break;
IColumn column = reducedColumns.next();
if (logger.isDebugEnabled())
logger.debug(String.format("collecting %s of %s: %s", liveColumns, count, column.getString(comparator)));
if (finish.remaining() > 0 && ((!reversed && comparator.compare(column.name(), finish) > 0)) || (reversed && comparator.compare(column.name(), finish) < 0))
break;
// only count live columns towards the `count` criteria
if (column.isLive() && (!container.isMarkedForDelete() || column.mostRecentLiveChangeAt() > container.getMarkedForDeleteAt())) {
liveColumns++;
}
// but we need to add all non-gc-able columns to the result for read repair:
if (QueryFilter.isRelevant(column, container, gcBefore))
container.addColumn(column);
}
}
use of org.apache.cassandra.db.marshal.AbstractType in project eiger by wlloyd.
the class ThriftValidation method validateFilterClauses.
// return true if index_clause contains an indexed columns with operator EQ
public static boolean validateFilterClauses(CFMetaData metadata, List<IndexExpression> index_clause) throws InvalidRequestException {
if (isEmpty(index_clause))
// no filter to apply
return false;
Set<ByteBuffer> indexedColumns = Table.open(metadata.ksName).getColumnFamilyStore(metadata.cfName).indexManager.getIndexedColumns();
AbstractType nameValidator = ColumnFamily.getComparatorFor(metadata.ksName, metadata.cfName, null);
boolean isIndexed = false;
for (IndexExpression expression : index_clause) {
try {
nameValidator.validate(expression.column_name);
} catch (MarshalException me) {
throw new InvalidRequestException(String.format("[%s]=[%s] failed name validation (%s)", ByteBufferUtil.bytesToHex(expression.column_name), ByteBufferUtil.bytesToHex(expression.value), me.getMessage()));
}
AbstractType valueValidator = Schema.instance.getValueValidator(metadata.ksName, metadata.cfName, expression.column_name);
try {
valueValidator.validate(expression.value);
} catch (MarshalException me) {
throw new InvalidRequestException(String.format("[%s]=[%s] failed value validation (%s)", ByteBufferUtil.bytesToHex(expression.column_name), ByteBufferUtil.bytesToHex(expression.value), me.getMessage()));
}
isIndexed |= (expression.op == IndexOperator.EQ) && indexedColumns.contains(expression.column_name);
}
return isIndexed;
}
Aggregations