Search in sources :

Example 36 with Type

use of org.apache.cassandra.cql3.statements.schema.IndexTarget.Type in project cassandra by apache.

the class Dump method dump.

public static void dump(List<String> arguments, String rollCycle, boolean follow) {
    StringBuilder sb = new StringBuilder();
    ReadMarshallable reader = wireIn -> {
        sb.setLength(0);
        int version = wireIn.read(BinLog.VERSION).int16();
        if (version > FullQueryLogger.CURRENT_VERSION) {
            throw new IORuntimeException("Unsupported record version [" + version + "] - highest supported version is [" + FullQueryLogger.CURRENT_VERSION + ']');
        }
        String type = wireIn.read(BinLog.TYPE).text();
        if (!FullQueryLogger.SINGLE_QUERY.equals((type)) && !FullQueryLogger.BATCH.equals((type))) {
            throw new IORuntimeException("Unsupported record type field [" + type + "] - supported record types are [" + FullQueryLogger.SINGLE_QUERY + ", " + FullQueryLogger.BATCH + ']');
        }
        sb.append("Type: ").append(type).append(System.lineSeparator());
        long queryStartTime = wireIn.read(FullQueryLogger.QUERY_START_TIME).int64();
        sb.append("Query start time: ").append(queryStartTime).append(System.lineSeparator());
        int protocolVersion = wireIn.read(FullQueryLogger.PROTOCOL_VERSION).int32();
        sb.append("Protocol version: ").append(protocolVersion).append(System.lineSeparator());
        QueryOptions options = QueryOptions.codec.decode(Unpooled.wrappedBuffer(wireIn.read(FullQueryLogger.QUERY_OPTIONS).bytes()), ProtocolVersion.decode(protocolVersion, true));
        long generatedTimestamp = wireIn.read(FullQueryLogger.GENERATED_TIMESTAMP).int64();
        sb.append("Generated timestamp:").append(generatedTimestamp).append(System.lineSeparator());
        int generatedNowInSeconds = wireIn.read(FullQueryLogger.GENERATED_NOW_IN_SECONDS).int32();
        sb.append("Generated nowInSeconds:").append(generatedNowInSeconds).append(System.lineSeparator());
        switch(type) {
            case (FullQueryLogger.SINGLE_QUERY):
                dumpQuery(options, wireIn, sb);
                break;
            case (FullQueryLogger.BATCH):
                dumpBatch(options, wireIn, sb);
                break;
            default:
                throw new IORuntimeException("Log entry of unsupported type " + type);
        }
        System.out.print(sb.toString());
        System.out.flush();
    };
    // Backoff strategy for spinning on the queue, not aggressive at all as this doesn't need to be low latency
    Pauser pauser = Pauser.millis(100);
    List<ChronicleQueue> queues = arguments.stream().distinct().map(path -> SingleChronicleQueueBuilder.single(new File(path)).readOnly(true).rollCycle(RollCycles.valueOf(rollCycle)).build()).collect(Collectors.toList());
    List<ExcerptTailer> tailers = queues.stream().map(ChronicleQueue::createTailer).collect(Collectors.toList());
    boolean hadWork = true;
    while (hadWork) {
        hadWork = false;
        for (ExcerptTailer tailer : tailers) {
            while (tailer.readDocument(reader)) {
                hadWork = true;
            }
        }
        if (follow) {
            if (!hadWork) {
                // Chronicle queue doesn't support blocking so use this backoff strategy
                pauser.pause();
            }
            // Don't terminate the loop even if there wasn't work
            hadWork = true;
        }
    }
}
Also used : IORuntimeException(net.openhft.chronicle.core.io.IORuntimeException) ByteBuffer(java.nio.ByteBuffer) ArrayList(java.util.ArrayList) Unpooled(io.netty.buffer.Unpooled) Bytes(net.openhft.chronicle.bytes.Bytes) Option(io.airlift.airline.Option) ReadMarshallable(net.openhft.chronicle.wire.ReadMarshallable) ProtocolVersion(org.apache.cassandra.transport.ProtocolVersion) Pauser(net.openhft.chronicle.threads.Pauser) ExcerptTailer(net.openhft.chronicle.queue.ExcerptTailer) BinLog(org.apache.cassandra.utils.binlog.BinLog) ChronicleQueue(net.openhft.chronicle.queue.ChronicleQueue) Collectors(java.util.stream.Collectors) File(java.io.File) BufferUnderflowException(java.nio.BufferUnderflowException) WireIn(net.openhft.chronicle.wire.WireIn) Command(io.airlift.airline.Command) SingleChronicleQueueBuilder(net.openhft.chronicle.queue.impl.single.SingleChronicleQueueBuilder) List(java.util.List) ValueIn(net.openhft.chronicle.wire.ValueIn) FullQueryLogger(org.apache.cassandra.fql.FullQueryLogger) RollCycles(net.openhft.chronicle.queue.RollCycles) Arguments(io.airlift.airline.Arguments) Collections(java.util.Collections) QueryOptions(org.apache.cassandra.cql3.QueryOptions) QueryOptions(org.apache.cassandra.cql3.QueryOptions) Pauser(net.openhft.chronicle.threads.Pauser) ExcerptTailer(net.openhft.chronicle.queue.ExcerptTailer) ReadMarshallable(net.openhft.chronicle.wire.ReadMarshallable) IORuntimeException(net.openhft.chronicle.core.io.IORuntimeException) ChronicleQueue(net.openhft.chronicle.queue.ChronicleQueue) File(java.io.File)

Example 37 with Type

use of org.apache.cassandra.cql3.statements.schema.IndexTarget.Type in project cassandra by apache.

the class CassandraGenerators method partitionKeyDataGen.

public static Gen<ByteBuffer> partitionKeyDataGen(TableMetadata metadata) {
    ImmutableList<ColumnMetadata> columns = metadata.partitionKeyColumns();
    assert !columns.isEmpty() : "Unable to find partition key columns";
    if (columns.size() == 1)
        return getTypeSupport(columns.get(0).type).bytesGen();
    List<Gen<ByteBuffer>> columnGens = new ArrayList<>(columns.size());
    for (ColumnMetadata cm : columns) columnGens.add(getTypeSupport(cm.type).bytesGen());
    return rnd -> {
        ByteBuffer[] buffers = new ByteBuffer[columnGens.size()];
        for (int i = 0; i < columnGens.size(); i++) buffers[i] = columnGens.get(i).generate(rnd);
        return CompositeType.build(ByteBufferAccessor.instance, buffers);
    };
}
Also used : LocalPartitioner(org.apache.cassandra.dht.LocalPartitioner) SourceDSL(org.quicktheories.generators.SourceDSL) AbstractType(org.apache.cassandra.db.marshal.AbstractType) ByteBuffer(java.nio.ByteBuffer) ByteOrderedPartitioner(org.apache.cassandra.dht.ByteOrderedPartitioner) InetAddress(java.net.InetAddress) ConnectionType(org.apache.cassandra.net.ConnectionType) Gen(org.quicktheories.core.Gen) Matcher(java.util.regex.Matcher) TimeUUIDType(org.apache.cassandra.db.marshal.TimeUUIDType) Murmur3Partitioner(org.apache.cassandra.dht.Murmur3Partitioner) NoPayload(org.apache.cassandra.net.NoPayload) PingRequest(org.apache.cassandra.net.PingRequest) SinglePartitionReadCommand(org.apache.cassandra.db.SinglePartitionReadCommand) CompositeType(org.apache.cassandra.db.marshal.CompositeType) TableParams(org.apache.cassandra.schema.TableParams) MultilineRecursiveToStringStyle(org.apache.commons.lang3.builder.MultilineRecursiveToStringStyle) AbstractTypeGenerators.allowReversed(org.apache.cassandra.utils.AbstractTypeGenerators.allowReversed) Generate(org.quicktheories.generators.Generate) Set(java.util.Set) Verb(org.apache.cassandra.net.Verb) List(java.util.List) Stream(java.util.stream.Stream) SMALL_TIME_SPAN_NANOS(org.apache.cassandra.utils.Generators.SMALL_TIME_SPAN_NANOS) ColumnIdentifier(org.apache.cassandra.cql3.ColumnIdentifier) RandomnessSource(org.quicktheories.core.RandomnessSource) Modifier(java.lang.reflect.Modifier) TableMetadata(org.apache.cassandra.schema.TableMetadata) Pattern(java.util.regex.Pattern) SchemaCQLHelper(org.apache.cassandra.db.SchemaCQLHelper) InetAddressAndPort(org.apache.cassandra.locator.InetAddressAndPort) ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) TableId(org.apache.cassandra.schema.TableId) FieldIdentifier(org.apache.cassandra.cql3.FieldIdentifier) ReadCommand(org.apache.cassandra.db.ReadCommand) Message(org.apache.cassandra.net.Message) IDENTIFIER_GEN(org.apache.cassandra.utils.Generators.IDENTIFIER_GEN) ArrayList(java.util.ArrayList) RandomPartitioner(org.apache.cassandra.dht.RandomPartitioner) HashSet(java.util.HashSet) Token(org.apache.cassandra.dht.Token) ImmutableList(com.google.common.collect.ImmutableList) ByteBufferAccessor(org.apache.cassandra.db.marshal.ByteBufferAccessor) EmptyType(org.apache.cassandra.db.marshal.EmptyType) OrderPreservingPartitioner(org.apache.cassandra.dht.OrderPreservingPartitioner) ReflectionToStringBuilder(org.apache.commons.lang3.builder.ReflectionToStringBuilder) AbstractTypeGenerators.getTypeSupport(org.apache.cassandra.utils.AbstractTypeGenerators.getTypeSupport) TIMESTAMP_NANOS(org.apache.cassandra.utils.Generators.TIMESTAMP_NANOS) Slices(org.apache.cassandra.db.Slices) Constraint(org.quicktheories.impl.Constraint) IPartitioner(org.apache.cassandra.dht.IPartitioner) TINY_TIME_SPAN_NANOS(org.apache.cassandra.utils.Generators.TINY_TIME_SPAN_NANOS) Gen(org.quicktheories.core.Gen) ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) ArrayList(java.util.ArrayList)

Example 38 with Type

use of org.apache.cassandra.cql3.statements.schema.IndexTarget.Type in project cassandra by apache.

the class CassandraGenerators method createColumnDefinition.

private static ColumnMetadata createColumnDefinition(String ks, String table, ColumnMetadata.Kind kind, Set<String> createdColumnNames, /* This is mutated to check for collisions, so has a side effect outside of normal random generation */
RandomnessSource rnd) {
    Gen<AbstractType<?>> typeGen = AbstractTypeGenerators.typeGen();
    switch(kind) {
        // empty type is also not supported, so filter out
        case PARTITION_KEY:
        case CLUSTERING:
            typeGen = Generators.filter(typeGen, t -> t != EmptyType.instance).map(AbstractType::freeze);
            break;
    }
    if (kind == ColumnMetadata.Kind.CLUSTERING) {
        // when working on a clustering column, add in reversed types periodically
        typeGen = allowReversed(typeGen);
    }
    // filter for unique names
    String str;
    while (!createdColumnNames.add(str = IDENTIFIER_GEN.generate(rnd))) {
    }
    ColumnIdentifier name = new ColumnIdentifier(str, true);
    int position = !kind.isPrimaryKeyKind() ? -1 : (int) rnd.next(Constraint.between(0, 30));
    return new ColumnMetadata(ks, table, name, typeGen.generate(rnd), position, kind);
}
Also used : ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) AbstractType(org.apache.cassandra.db.marshal.AbstractType) ColumnIdentifier(org.apache.cassandra.cql3.ColumnIdentifier) Constraint(org.quicktheories.impl.Constraint)

Example 39 with Type

use of org.apache.cassandra.cql3.statements.schema.IndexTarget.Type in project cassandra by apache.

the class SSTableHeaderFixTest method variousDroppedUserTypes.

@Test
public void variousDroppedUserTypes() throws Exception {
    File dir = temporaryFolder;
    TableMetadata.Builder cols = TableMetadata.builder("ks", "cf").addPartitionKeyColumn("pk", udtPK).addClusteringColumn("ck", udtCK);
    ColSpec[] colSpecs = new ColSpec[] { // 'frozen<udt>' / live
    new ColSpec("frozen_udt_as_frozen_udt_live", makeUDT2("frozen_udt_as_frozen_udt_live", false), makeUDT2("frozen_udt_as_frozen_udt_live", false), false, false), // 'frozen<udt>' / live / as 'udt'
    new ColSpec("frozen_udt_as_unfrozen_udt_live", makeUDT2("frozen_udt_as_unfrozen_udt_live", false), makeUDT2("frozen_udt_as_unfrozen_udt_live", true), false, true), // 'frozen<udt>' / dropped
    new ColSpec("frozen_udt_as_frozen_udt_dropped", makeUDT2("frozen_udt_as_frozen_udt_dropped", true).freezeNestedMulticellTypes().freeze().expandUserTypes(), makeUDT2("frozen_udt_as_frozen_udt_dropped", false), makeUDT2("frozen_udt_as_frozen_udt_dropped", false), true, false), // 'frozen<udt>' / dropped / as 'udt'
    new ColSpec("frozen_udt_as_unfrozen_udt_dropped", makeUDT2("frozen_udt_as_unfrozen_udt_dropped", true).freezeNestedMulticellTypes().freeze().expandUserTypes(), makeUDT2("frozen_udt_as_unfrozen_udt_dropped", true), makeUDT2("frozen_udt_as_unfrozen_udt_dropped", false), true, true), // 'udt' / live
    new ColSpec("unfrozen_udt_as_unfrozen_udt_live", makeUDT2("unfrozen_udt_as_unfrozen_udt_live", true), makeUDT2("unfrozen_udt_as_unfrozen_udt_live", true), false, false), // 'frozen<tuple>' as 'TupleType(multiCell=false' (there is nothing like 'FrozenType(TupleType(')
    new ColSpec("frozen_tuple_as_frozen_tuple_live", makeTupleSimple(), makeTupleSimple(), false, false), // 'frozen<tuple>' as 'TupleType(multiCell=false' (there is nothing like 'FrozenType(TupleType(')
    new ColSpec("frozen_tuple_as_frozen_tuple_dropped", makeTupleSimple(), makeTupleSimple(), true, false) };
    Arrays.stream(colSpecs).forEach(c -> cols.addRegularColumn(c.name, // use the initial column type for the serialization header header.
    c.preFix));
    Map<String, ColSpec> colSpecMap = Arrays.stream(colSpecs).collect(Collectors.toMap(c -> c.name, c -> c));
    File sstable = buildFakeSSTable(dir, 1, cols, c -> {
        ColSpec cs = colSpecMap.get(c.name.toString());
        if (cs == null)
            return c;
        // update the column type in the schema to the "correct" one.
        return c.withNewType(cs.schema);
    });
    Arrays.stream(colSpecs).filter(c -> c.dropped).forEach(c -> {
        ColumnMetadata cd = getColDef(c.name);
        tableMetadata = tableMetadata.unbuild().removeRegularOrStaticColumn(cd.name).recordColumnDrop(cd, FBUtilities.timestampMicros()).build();
    });
    SerializationHeader.Component header = readHeader(sstable);
    for (ColSpec colSpec : colSpecs) {
        AbstractType<?> hdrType = header.getRegularColumns().get(ByteBufferUtil.bytes(colSpec.name));
        assertEquals(colSpec.name, colSpec.preFix, hdrType);
        assertEquals(colSpec.name, colSpec.preFix.isMultiCell(), hdrType.isMultiCell());
    }
    SSTableHeaderFix headerFix = builder().withPath(sstable.toPath()).build();
    headerFix.execute();
    assertFalse(headerFix.hasError());
    assertTrue(headerFix.hasChanges());
    // Verify that all columns to fix are in the updatedColumns set (paranoid, yet)
    Arrays.stream(colSpecs).filter(c -> c.mustFix).forEach(c -> assertTrue("expect " + c.name + " to be updated, but was not (" + updatedColumns + ")", updatedColumns.contains(c.name)));
    // Verify that the number of updated columns maches the expected number of columns to fix
    assertEquals(Arrays.stream(colSpecs).filter(c -> c.mustFix).count(), updatedColumns.size());
    header = readHeader(sstable);
    for (ColSpec colSpec : colSpecs) {
        AbstractType<?> hdrType = header.getRegularColumns().get(ByteBufferUtil.bytes(colSpec.name));
        assertEquals(colSpec.name, colSpec.expect, hdrType);
        assertEquals(colSpec.name, colSpec.expect.isMultiCell(), hdrType.isMultiCell());
    }
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) Arrays(java.util.Arrays) IndexTarget(org.apache.cassandra.cql3.statements.schema.IndexTarget) File(org.apache.cassandra.io.util.File) AbstractType(org.apache.cassandra.db.marshal.AbstractType) ByteBuffer(java.nio.ByteBuffer) Matcher(java.util.regex.Matcher) Map(java.util.Map) After(org.junit.After) SSTableFormat(org.apache.cassandra.io.sstable.format.SSTableFormat) Assert.fail(org.junit.Assert.fail) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) CollectionType(org.apache.cassandra.db.marshal.CollectionType) CompositeType(org.apache.cassandra.db.marshal.CompositeType) SequentialWriter(org.apache.cassandra.io.util.SequentialWriter) Version(org.apache.cassandra.io.sstable.format.Version) FBUtilities(org.apache.cassandra.utils.FBUtilities) MetadataType(org.apache.cassandra.io.sstable.metadata.MetadataType) Set(java.util.Set) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) List(java.util.List) ColumnIdentifier(org.apache.cassandra.cql3.ColumnIdentifier) FloatType(org.apache.cassandra.db.marshal.FloatType) FileUtils(org.apache.cassandra.io.util.FileUtils) Assert.assertFalse(org.junit.Assert.assertFalse) FrozenType(org.apache.cassandra.db.marshal.FrozenType) TableMetadata(org.apache.cassandra.schema.TableMetadata) Pattern(java.util.regex.Pattern) IntStream(java.util.stream.IntStream) ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) SetType(org.apache.cassandra.db.marshal.SetType) FieldIdentifier(org.apache.cassandra.cql3.FieldIdentifier) Function(java.util.function.Function) HashSet(java.util.HashSet) Int32Type(org.apache.cassandra.db.marshal.Int32Type) ListType(org.apache.cassandra.db.marshal.ListType) UTF8Type(org.apache.cassandra.db.marshal.UTF8Type) AbstractCompositeType(org.apache.cassandra.db.marshal.AbstractCompositeType) TupleType(org.apache.cassandra.db.marshal.TupleType) BigFormat(org.apache.cassandra.io.sstable.format.big.BigFormat) SerializationHeader(org.apache.cassandra.db.SerializationHeader) Before(org.junit.Before) IndexMetadata(org.apache.cassandra.schema.IndexMetadata) ByteBufferUtil(org.apache.cassandra.utils.ByteBufferUtil) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) MapType(org.apache.cassandra.db.marshal.MapType) EncodingStats(org.apache.cassandra.db.rows.EncodingStats) Assert(org.junit.Assert) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) UserType(org.apache.cassandra.db.marshal.UserType) ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) SerializationHeader(org.apache.cassandra.db.SerializationHeader) File(org.apache.cassandra.io.util.File) Test(org.junit.Test)

Example 40 with Type

use of org.apache.cassandra.cql3.statements.schema.IndexTarget.Type in project stargate-core by tuplejump.

the class RowIndexSupport method addFields.

protected void addFields(Cell column, String name, ColumnDefinition columnDefinition, List<Field> fields) {
    boolean isObject = options.isObject(name);
    if (isObject) {
        String value = UTF8Type.instance.compose(column.value());
        JsonDocument document = new StreamingJsonDocument(value, options.primary, name);
        fields.addAll(document.getFields());
    } else if (column.name().isCollectionCell()) {
        List<Field> fieldsForField = collectionFields((CollectionType) columnDefinition.type, name, column);
        fields.addAll(fieldsForField);
    } else {
        FieldType fieldType = options.fieldTypes.get(name);
        Type type = options.types.get(name);
        addField(type, columnDefinition, name, fieldType, column.value(), fields);
        if (options.containsDocValues()) {
            FieldType docValueType = options.fieldDocValueTypes.get(name);
            if (docValueType != null) {
                Field docValueField = Fields.docValueField(name, columnDefinition.type, column.value(), docValueType);
                fields.add(docValueField);
            }
        }
    }
}
Also used : Field(org.apache.lucene.document.Field) FieldType(org.apache.lucene.document.FieldType) CQL3Type(org.apache.cassandra.cql3.CQL3Type) CType(org.apache.cassandra.db.composites.CType) StreamingJsonDocument(com.tuplejump.stargate.lucene.json.StreamingJsonDocument) FastList(javolution.util.FastList) JsonDocument(com.tuplejump.stargate.lucene.json.JsonDocument) StreamingJsonDocument(com.tuplejump.stargate.lucene.json.StreamingJsonDocument) FieldType(org.apache.lucene.document.FieldType)

Aggregations

CQL3Type (org.apache.cassandra.cql3.CQL3Type)14 ByteBuffer (java.nio.ByteBuffer)12 Test (org.junit.Test)12 AbstractType (org.apache.cassandra.db.marshal.AbstractType)11 ColumnMetadata (org.apache.cassandra.schema.ColumnMetadata)10 ArrayList (java.util.ArrayList)9 List (java.util.List)8 ClientState (org.apache.cassandra.service.ClientState)8 ColumnIdentifier (org.apache.cassandra.cql3.ColumnIdentifier)7 ProtocolVersion (org.apache.cassandra.transport.ProtocolVersion)7 FunctionName (org.apache.cassandra.cql3.functions.FunctionName)6 InvalidRequestException (org.apache.cassandra.exceptions.InvalidRequestException)6 java.util (java.util)4 Collections (java.util.Collections)4 ChronicleQueue (net.openhft.chronicle.queue.ChronicleQueue)4 ExcerptTailer (net.openhft.chronicle.queue.ExcerptTailer)4 RollCycles (net.openhft.chronicle.queue.RollCycles)4 QueryOptions (org.apache.cassandra.cql3.QueryOptions)4 TableMetadata (org.apache.cassandra.schema.TableMetadata)4 Set (java.util.Set)3