Search in sources :

Example 16 with Type

use of org.apache.cassandra.cql3.statements.schema.IndexTarget.Type in project cassandra by apache.

the class TupleTypesRepresentationTest method verifyTypes.

@Test
public void verifyTypes() {
    AssertionError master = null;
    for (TypeDef typeDef : allTypes) {
        try {
            assertEquals(typeDef.toString() + "\n typeString vs type\n", typeDef.typeString, typeDef.type.toString());
            assertEquals(typeDef.toString() + "\n typeString vs cqlType.getType()\n", typeDef.typeString, typeDef.cqlType.getType().toString());
            AbstractType<?> expanded = typeDef.type.expandUserTypes();
            CQL3Type expandedCQL = expanded.asCQL3Type();
            // Note: cannot include this commented-out assertion, because the parsed CQL3Type instance for
            // 'frozen<list<tuple<text, text>>>' returns 'frozen<list<frozen<tuple<text, text>>>>' via it's CQL3Type.toString()
            // implementation.
            assertEquals(typeDef.toString() + "\n droppedCqlType\n", typeDef.droppedCqlType, expandedCQL);
            assertEquals(typeDef.toString() + "\n droppedCqlTypeString\n", typeDef.droppedCqlTypeString, expandedCQL.toString());
            assertEquals(typeDef.toString() + "\n multiCell\n", typeDef.type.isMultiCell(), typeDef.droppedType.isMultiCell());
            AbstractType<?> parsedType = TypeParser.parse(typeDef.typeString);
            assertEquals(typeDef.toString(), typeDef.typeString, parsedType.toString());
        } catch (AssertionError ae) {
            if (master == null)
                master = ae;
            else
                master.addSuppressed(ae);
        }
    }
    if (master != null)
        throw master;
}
Also used : CQL3Type(org.apache.cassandra.cql3.CQL3Type) Test(org.junit.Test)

Example 17 with Type

use of org.apache.cassandra.cql3.statements.schema.IndexTarget.Type in project cassandra by apache.

the class FQLQueryReader method readMarshallable.

public void readMarshallable(WireIn wireIn) throws IORuntimeException {
    verifyVersion(wireIn);
    String type = readType(wireIn);
    long queryStartTime = wireIn.read(QUERY_START_TIME).int64();
    int protocolVersion = wireIn.read(PROTOCOL_VERSION).int32();
    QueryOptions queryOptions = QueryOptions.codec.decode(Unpooled.wrappedBuffer(wireIn.read(QUERY_OPTIONS).bytes()), ProtocolVersion.decode(protocolVersion, true));
    long generatedTimestamp = wireIn.read(GENERATED_TIMESTAMP).int64();
    int generatedNowInSeconds = wireIn.read(GENERATED_NOW_IN_SECONDS).int32();
    String keyspace = wireIn.read(KEYSPACE).text();
    switch(type) {
        case SINGLE_QUERY:
            String queryString = wireIn.read(QUERY).text();
            query = new FQLQuery.Single(keyspace, protocolVersion, queryOptions, queryStartTime, generatedTimestamp, generatedNowInSeconds, queryString, queryOptions.getValues());
            break;
        case BATCH:
            BatchStatement.Type batchType = BatchStatement.Type.valueOf(wireIn.read(BATCH_TYPE).text());
            ValueIn in = wireIn.read(QUERIES);
            int queryCount = in.int32();
            List<String> queries = new ArrayList<>(queryCount);
            for (int i = 0; i < queryCount; i++) queries.add(in.text());
            in = wireIn.read(VALUES);
            int valueCount = in.int32();
            List<List<ByteBuffer>> values = new ArrayList<>(valueCount);
            for (int ii = 0; ii < valueCount; ii++) {
                List<ByteBuffer> subValues = new ArrayList<>();
                values.add(subValues);
                int numSubValues = in.int32();
                for (int zz = 0; zz < numSubValues; zz++) subValues.add(ByteBuffer.wrap(in.bytes()));
            }
            query = new FQLQuery.Batch(keyspace, protocolVersion, queryOptions, queryStartTime, generatedTimestamp, generatedNowInSeconds, batchType, queries, values);
            break;
        default:
            throw new IORuntimeException("Unhandled record type: " + type);
    }
}
Also used : ArrayList(java.util.ArrayList) QueryOptions(org.apache.cassandra.cql3.QueryOptions) ByteBuffer(java.nio.ByteBuffer) ValueIn(net.openhft.chronicle.wire.ValueIn) IORuntimeException(net.openhft.chronicle.core.io.IORuntimeException) BatchStatement(com.datastax.driver.core.BatchStatement) ArrayList(java.util.ArrayList) List(java.util.List)

Example 18 with Type

use of org.apache.cassandra.cql3.statements.schema.IndexTarget.Type in project stargate-core by tuplejump.

the class RowIndexSupport method addKeyField.

private void addKeyField(long timestamp, Map.Entry<String, ColumnDefinition> entry, ByteBuffer value, IndexEntryBuilder builder) {
    String keyColumnName = entry.getValue().name.toString();
    builder.setCurrentTimestamp(timestamp);
    List<Field> fields = builder.getFieldList();
    FieldType fieldType = options.fieldTypes.get(keyColumnName);
    Type type = options.types.get(keyColumnName);
    addField(type, entry.getValue(), keyColumnName, fieldType, value, fields);
    if (options.containsDocValues()) {
        FieldType docValueType = options.fieldDocValueTypes.get(keyColumnName);
        if (docValueType != null) {
            Field docValueField = Fields.docValueField(keyColumnName, entry.getValue().type, value, docValueType);
            fields.add(docValueField);
        }
    }
}
Also used : Field(org.apache.lucene.document.Field) FieldType(org.apache.lucene.document.FieldType) CQL3Type(org.apache.cassandra.cql3.CQL3Type) CType(org.apache.cassandra.db.composites.CType) FieldType(org.apache.lucene.document.FieldType)

Example 19 with Type

use of org.apache.cassandra.cql3.statements.schema.IndexTarget.Type in project stargate-core by tuplejump.

the class CassandraUtils method getOptions.

public static Options getOptions(Properties mapping, ColumnFamilyStore baseCfs, String colName) {
    Map<String, NumericConfig> numericFieldOptions = new HashMap<>();
    Map<String, FieldType> fieldDocValueTypes = new TreeMap<>();
    Map<String, FieldType> collectionFieldDocValueTypes = new TreeMap<>();
    Map<String, FieldType> fieldTypes = new TreeMap<>();
    Map<String, FieldType[]> collectionFieldTypes = new TreeMap<>();
    Map<String, ColumnDefinition> validators = new TreeMap<>();
    Map<String, ColumnDefinition> clusteringKeysIndexed = new LinkedHashMap<>();
    Map<String, ColumnDefinition> partitionKeysIndexed = new LinkedHashMap<>();
    Set<String> indexedColumnNames;
    // getForRow all the fields options.
    indexedColumnNames = new TreeSet<>();
    indexedColumnNames.addAll(mapping.getFields().keySet());
    Set<String> added = new HashSet<>(indexedColumnNames.size());
    List<ColumnDefinition> partitionKeys = baseCfs.metadata.partitionKeyColumns();
    List<ColumnDefinition> clusteringKeys = baseCfs.metadata.clusteringColumns();
    for (ColumnDefinition colDef : partitionKeys) {
        String columnName = colDef.name.toString();
        if (Options.logger.isDebugEnabled()) {
            Options.logger.debug("Partition key name is {} and index is {}", colName, colDef.position());
        }
        validators.put(columnName, colDef);
        if (indexedColumnNames.contains(columnName)) {
            partitionKeysIndexed.put(colName, colDef);
            addPropertiesAndFieldType(mapping, numericFieldOptions, fieldDocValueTypes, collectionFieldDocValueTypes, fieldTypes, collectionFieldTypes, added, colDef, columnName);
        }
    }
    for (ColumnDefinition colDef : clusteringKeys) {
        String columnName = colDef.name.toString();
        if (Options.logger.isDebugEnabled()) {
            Options.logger.debug("Clustering key name is {} and index is {}", colName, colDef.position() + 1);
        }
        validators.put(columnName, colDef);
        if (indexedColumnNames.contains(columnName)) {
            clusteringKeysIndexed.put(columnName, colDef);
            addPropertiesAndFieldType(mapping, numericFieldOptions, fieldDocValueTypes, collectionFieldDocValueTypes, fieldTypes, collectionFieldTypes, added, colDef, columnName);
        }
    }
    for (String columnName : indexedColumnNames) {
        if (added.add(columnName.toLowerCase())) {
            Properties options = mapping.getFields().get(columnName);
            ColumnDefinition colDef = getColumnDefinition(baseCfs, columnName);
            if (colDef != null) {
                validators.put(columnName, colDef);
                addFieldType(columnName, colDef.type, options, numericFieldOptions, fieldDocValueTypes, collectionFieldDocValueTypes, fieldTypes, collectionFieldTypes);
            } else {
                throw new IllegalArgumentException(String.format("Column Definition for %s not found", columnName));
            }
            if (options.getType() == Type.object) {
                mapping.getFields().putAll(options.getFields());
            }
        }
    }
    Set<ColumnDefinition> otherColumns = baseCfs.metadata.regularColumns();
    for (ColumnDefinition colDef : otherColumns) {
        String columnName = UTF8Type.instance.getString(colDef.name.bytes);
        validators.put(columnName, colDef);
    }
    numericFieldOptions.putAll(mapping.getDynamicNumericConfig());
    Analyzer defaultAnalyzer = mapping.getLuceneAnalyzer();
    Analyzer analyzer = new PerFieldAnalyzerWrapper(defaultAnalyzer, mapping.perFieldAnalyzers());
    Map<String, Type> types = new TreeMap<>();
    Set<String> nestedFields = new TreeSet<>();
    for (Map.Entry<String, ColumnDefinition> entry : validators.entrySet()) {
        CQL3Type cql3Type = entry.getValue().type.asCQL3Type();
        AbstractType inner = getValueValidator(cql3Type.getType());
        if (cql3Type.isCollection()) {
            types.put(entry.getKey(), fromAbstractType(inner.asCQL3Type()));
            nestedFields.add(entry.getKey());
        } else {
            types.put(entry.getKey(), fromAbstractType(cql3Type));
        }
    }
    return new Options(mapping, numericFieldOptions, fieldDocValueTypes, collectionFieldDocValueTypes, fieldTypes, collectionFieldTypes, types, nestedFields, clusteringKeysIndexed, partitionKeysIndexed, indexedColumnNames, analyzer, colName);
}
Also used : CQL3Type(org.apache.cassandra.cql3.CQL3Type) Options(com.tuplejump.stargate.lucene.Options) Properties(com.tuplejump.stargate.lucene.Properties) Analyzer(org.apache.lucene.analysis.Analyzer) FieldType(org.apache.lucene.document.FieldType) ColumnDefinition(org.apache.cassandra.config.ColumnDefinition) PerFieldAnalyzerWrapper(org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper) Type(com.tuplejump.stargate.lucene.Type) FieldType(org.apache.lucene.document.FieldType) CQL3Type(org.apache.cassandra.cql3.CQL3Type) NumericConfig(org.apache.lucene.queryparser.flexible.standard.config.NumericConfig)

Example 20 with Type

use of org.apache.cassandra.cql3.statements.schema.IndexTarget.Type in project cassandra by apache.

the class CreateViewStatement method announceMigration.

public Event.SchemaChange announceMigration(QueryState queryState, boolean isLocalOnly) throws RequestValidationException {
    // We need to make sure that:
    //  - primary key includes all columns in base table's primary key
    //  - make sure that the select statement does not have anything other than columns
    //    and their names match the base table's names
    //  - make sure that primary key does not include any collections
    //  - make sure there is no where clause in the select statement
    //  - make sure there is not currently a table or view
    //  - make sure baseTable gcGraceSeconds > 0
    properties.validate();
    if (properties.useCompactStorage)
        throw new InvalidRequestException("Cannot use 'COMPACT STORAGE' when defining a materialized view");
    // specific replica would break
    if (!baseName.getKeyspace().equals(keyspace()))
        throw new InvalidRequestException("Cannot create a materialized view on a table in a separate keyspace");
    TableMetadata metadata = Schema.instance.validateTable(baseName.getKeyspace(), baseName.getColumnFamily());
    if (metadata.isCounter())
        throw new InvalidRequestException("Materialized views are not supported on counter tables");
    if (metadata.isView())
        throw new InvalidRequestException("Materialized views cannot be created against other materialized views");
    if (metadata.params.gcGraceSeconds == 0) {
        throw new InvalidRequestException(String.format("Cannot create materialized view '%s' for base table " + "'%s' with gc_grace_seconds of 0, since this value is " + "used to TTL undelivered updates. Setting gc_grace_seconds" + " too low might cause undelivered updates to expire " + "before being replayed.", cfName.getColumnFamily(), baseName.getColumnFamily()));
    }
    Set<ColumnIdentifier> included = Sets.newHashSetWithExpectedSize(selectClause.size());
    for (RawSelector selector : selectClause) {
        Selectable.Raw selectable = selector.selectable;
        if (selectable instanceof Selectable.WithFieldSelection.Raw)
            throw new InvalidRequestException("Cannot select out a part of type when defining a materialized view");
        if (selectable instanceof Selectable.WithFunction.Raw)
            throw new InvalidRequestException("Cannot use function when defining a materialized view");
        if (selectable instanceof Selectable.WritetimeOrTTL.Raw)
            throw new InvalidRequestException("Cannot use function when defining a materialized view");
        if (selector.alias != null)
            throw new InvalidRequestException("Cannot use alias when defining a materialized view");
        Selectable s = selectable.prepare(metadata);
        if (s instanceof Term.Raw)
            throw new InvalidRequestException("Cannot use terms in selection when defining a materialized view");
        ColumnMetadata cdef = (ColumnMetadata) s;
        included.add(cdef.name);
    }
    Set<ColumnMetadata.Raw> targetPrimaryKeys = new HashSet<>();
    for (ColumnMetadata.Raw identifier : Iterables.concat(partitionKeys, clusteringKeys)) {
        if (!targetPrimaryKeys.add(identifier))
            throw new InvalidRequestException("Duplicate entry found in PRIMARY KEY: " + identifier);
        ColumnMetadata cdef = identifier.prepare(metadata);
        if (cdef.type.isMultiCell())
            throw new InvalidRequestException(String.format("Cannot use MultiCell column '%s' in PRIMARY KEY of materialized view", identifier));
        if (cdef.isStatic())
            throw new InvalidRequestException(String.format("Cannot use Static column '%s' in PRIMARY KEY of materialized view", identifier));
        if (cdef.type instanceof DurationType)
            throw new InvalidRequestException(String.format("Cannot use Duration column '%s' in PRIMARY KEY of materialized view", identifier));
    }
    // build the select statement
    Map<ColumnMetadata.Raw, Boolean> orderings = Collections.emptyMap();
    List<ColumnMetadata.Raw> groups = Collections.emptyList();
    SelectStatement.Parameters parameters = new SelectStatement.Parameters(orderings, groups, false, true, false);
    SelectStatement.RawStatement rawSelect = new SelectStatement.RawStatement(baseName, parameters, selectClause, whereClause, null, null);
    ClientState state = ClientState.forInternalCalls();
    state.setKeyspace(keyspace());
    rawSelect.prepareKeyspace(state);
    rawSelect.setBoundVariables(getBoundVariables());
    ParsedStatement.Prepared prepared = rawSelect.prepare(true);
    SelectStatement select = (SelectStatement) prepared.statement;
    StatementRestrictions restrictions = select.getRestrictions();
    if (!prepared.boundNames.isEmpty())
        throw new InvalidRequestException("Cannot use query parameters in CREATE MATERIALIZED VIEW statements");
    String whereClauseText = View.relationsToWhereClause(whereClause.relations);
    Set<ColumnIdentifier> basePrimaryKeyCols = new HashSet<>();
    for (ColumnMetadata definition : Iterables.concat(metadata.partitionKeyColumns(), metadata.clusteringColumns())) basePrimaryKeyCols.add(definition.name);
    List<ColumnIdentifier> targetClusteringColumns = new ArrayList<>();
    List<ColumnIdentifier> targetPartitionKeys = new ArrayList<>();
    // This is only used as an intermediate state; this is to catch whether multiple non-PK columns are used
    boolean hasNonPKColumn = false;
    for (ColumnMetadata.Raw raw : partitionKeys) hasNonPKColumn |= getColumnIdentifier(metadata, basePrimaryKeyCols, hasNonPKColumn, raw, targetPartitionKeys, restrictions);
    for (ColumnMetadata.Raw raw : clusteringKeys) hasNonPKColumn |= getColumnIdentifier(metadata, basePrimaryKeyCols, hasNonPKColumn, raw, targetClusteringColumns, restrictions);
    // We need to include all of the primary key columns from the base table in order to make sure that we do not
    // overwrite values in the view. We cannot support "collapsing" the base table into a smaller number of rows in
    // the view because if we need to generate a tombstone, we have no way of knowing which value is currently being
    // used in the view and whether or not to generate a tombstone. In order to not surprise our users, we require
    // that they include all of the columns. We provide them with a list of all of the columns left to include.
    boolean missingClusteringColumns = false;
    StringBuilder columnNames = new StringBuilder();
    List<ColumnIdentifier> includedColumns = new ArrayList<>();
    for (ColumnMetadata def : metadata.columns()) {
        ColumnIdentifier identifier = def.name;
        boolean includeDef = included.isEmpty() || included.contains(identifier);
        if (includeDef && def.isStatic()) {
            throw new InvalidRequestException(String.format("Unable to include static column '%s' which would be included by Materialized View SELECT * statement", identifier));
        }
        boolean defInTargetPrimaryKey = targetClusteringColumns.contains(identifier) || targetPartitionKeys.contains(identifier);
        if (includeDef && !defInTargetPrimaryKey) {
            includedColumns.add(identifier);
        }
        if (!def.isPrimaryKeyColumn())
            continue;
        if (!defInTargetPrimaryKey) {
            if (missingClusteringColumns)
                columnNames.append(',');
            else
                missingClusteringColumns = true;
            columnNames.append(identifier);
        }
    }
    if (missingClusteringColumns)
        throw new InvalidRequestException(String.format("Cannot create Materialized View %s without primary key columns from base %s (%s)", columnFamily(), baseName.getColumnFamily(), columnNames.toString()));
    if (targetPartitionKeys.isEmpty())
        throw new InvalidRequestException("Must select at least a column for a Materialized View");
    if (targetClusteringColumns.isEmpty())
        throw new InvalidRequestException("No columns are defined for Materialized View other than primary key");
    TableParams params = properties.properties.asNewTableParams();
    if (params.defaultTimeToLive > 0) {
        throw new InvalidRequestException("Cannot set default_time_to_live for a materialized view. " + "Data in a materialized view always expire at the same time than " + "the corresponding data in the parent table.");
    }
    TableMetadata.Builder builder = TableMetadata.builder(keyspace(), columnFamily(), properties.properties.getId()).isView(true).params(params);
    add(metadata, targetPartitionKeys, builder::addPartitionKeyColumn);
    add(metadata, targetClusteringColumns, builder::addClusteringColumn);
    add(metadata, includedColumns, builder::addRegularColumn);
    ViewMetadata definition = new ViewMetadata(keyspace(), columnFamily(), metadata.id, metadata.name, included.isEmpty(), rawSelect, whereClauseText, builder.build());
    try {
        MigrationManager.announceNewView(definition, isLocalOnly);
        return new Event.SchemaChange(Event.SchemaChange.Change.CREATED, Event.SchemaChange.Target.TABLE, keyspace(), columnFamily());
    } catch (AlreadyExistsException e) {
        if (ifNotExists)
            return null;
        throw e;
    }
}
Also used : ClientState(org.apache.cassandra.service.ClientState) RawSelector(org.apache.cassandra.cql3.selection.RawSelector) ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) Selectable(org.apache.cassandra.cql3.selection.Selectable) InvalidRequestException(org.apache.cassandra.exceptions.InvalidRequestException) StatementRestrictions(org.apache.cassandra.cql3.restrictions.StatementRestrictions) ViewMetadata(org.apache.cassandra.schema.ViewMetadata) TableMetadata(org.apache.cassandra.schema.TableMetadata) DurationType(org.apache.cassandra.db.marshal.DurationType) TableParams(org.apache.cassandra.schema.TableParams) AlreadyExistsException(org.apache.cassandra.exceptions.AlreadyExistsException)

Aggregations

CQL3Type (org.apache.cassandra.cql3.CQL3Type)14 ByteBuffer (java.nio.ByteBuffer)12 Test (org.junit.Test)12 AbstractType (org.apache.cassandra.db.marshal.AbstractType)11 ColumnMetadata (org.apache.cassandra.schema.ColumnMetadata)10 ArrayList (java.util.ArrayList)9 List (java.util.List)8 ClientState (org.apache.cassandra.service.ClientState)8 ColumnIdentifier (org.apache.cassandra.cql3.ColumnIdentifier)7 ProtocolVersion (org.apache.cassandra.transport.ProtocolVersion)7 FunctionName (org.apache.cassandra.cql3.functions.FunctionName)6 InvalidRequestException (org.apache.cassandra.exceptions.InvalidRequestException)6 java.util (java.util)4 Collections (java.util.Collections)4 ChronicleQueue (net.openhft.chronicle.queue.ChronicleQueue)4 ExcerptTailer (net.openhft.chronicle.queue.ExcerptTailer)4 RollCycles (net.openhft.chronicle.queue.RollCycles)4 QueryOptions (org.apache.cassandra.cql3.QueryOptions)4 TableMetadata (org.apache.cassandra.schema.TableMetadata)4 Set (java.util.Set)3