use of org.apache.cassandra.cql3.statements.BatchStatement.Type in project cassandra by apache.
the class Dump method dump.
public static void dump(List<String> arguments, String rollCycle, boolean follow) {
StringBuilder sb = new StringBuilder();
ReadMarshallable reader = wireIn -> {
sb.setLength(0);
int version = wireIn.read(BinLog.VERSION).int16();
if (version > FullQueryLogger.CURRENT_VERSION) {
throw new IORuntimeException("Unsupported record version [" + version + "] - highest supported version is [" + FullQueryLogger.CURRENT_VERSION + ']');
}
String type = wireIn.read(BinLog.TYPE).text();
if (!FullQueryLogger.SINGLE_QUERY.equals((type)) && !FullQueryLogger.BATCH.equals((type))) {
throw new IORuntimeException("Unsupported record type field [" + type + "] - supported record types are [" + FullQueryLogger.SINGLE_QUERY + ", " + FullQueryLogger.BATCH + ']');
}
sb.append("Type: ").append(type).append(System.lineSeparator());
long queryStartTime = wireIn.read(FullQueryLogger.QUERY_START_TIME).int64();
sb.append("Query start time: ").append(queryStartTime).append(System.lineSeparator());
int protocolVersion = wireIn.read(FullQueryLogger.PROTOCOL_VERSION).int32();
sb.append("Protocol version: ").append(protocolVersion).append(System.lineSeparator());
QueryOptions options = QueryOptions.codec.decode(Unpooled.wrappedBuffer(wireIn.read(FullQueryLogger.QUERY_OPTIONS).bytes()), ProtocolVersion.decode(protocolVersion, true));
long generatedTimestamp = wireIn.read(FullQueryLogger.GENERATED_TIMESTAMP).int64();
sb.append("Generated timestamp:").append(generatedTimestamp).append(System.lineSeparator());
int generatedNowInSeconds = wireIn.read(FullQueryLogger.GENERATED_NOW_IN_SECONDS).int32();
sb.append("Generated nowInSeconds:").append(generatedNowInSeconds).append(System.lineSeparator());
switch(type) {
case (FullQueryLogger.SINGLE_QUERY):
dumpQuery(options, wireIn, sb);
break;
case (FullQueryLogger.BATCH):
dumpBatch(options, wireIn, sb);
break;
default:
throw new IORuntimeException("Log entry of unsupported type " + type);
}
System.out.print(sb.toString());
System.out.flush();
};
// Backoff strategy for spinning on the queue, not aggressive at all as this doesn't need to be low latency
Pauser pauser = Pauser.millis(100);
List<ChronicleQueue> queues = arguments.stream().distinct().map(path -> SingleChronicleQueueBuilder.single(new File(path)).readOnly(true).rollCycle(RollCycles.valueOf(rollCycle)).build()).collect(Collectors.toList());
List<ExcerptTailer> tailers = queues.stream().map(ChronicleQueue::createTailer).collect(Collectors.toList());
boolean hadWork = true;
while (hadWork) {
hadWork = false;
for (ExcerptTailer tailer : tailers) {
while (tailer.readDocument(reader)) {
hadWork = true;
}
}
if (follow) {
if (!hadWork) {
// Chronicle queue doesn't support blocking so use this backoff strategy
pauser.pause();
}
// Don't terminate the loop even if there wasn't work
hadWork = true;
}
}
}
use of org.apache.cassandra.cql3.statements.BatchStatement.Type in project cassandra by apache.
the class CassandraGenerators method partitionKeyDataGen.
public static Gen<ByteBuffer> partitionKeyDataGen(TableMetadata metadata) {
ImmutableList<ColumnMetadata> columns = metadata.partitionKeyColumns();
assert !columns.isEmpty() : "Unable to find partition key columns";
if (columns.size() == 1)
return getTypeSupport(columns.get(0).type).bytesGen();
List<Gen<ByteBuffer>> columnGens = new ArrayList<>(columns.size());
for (ColumnMetadata cm : columns) columnGens.add(getTypeSupport(cm.type).bytesGen());
return rnd -> {
ByteBuffer[] buffers = new ByteBuffer[columnGens.size()];
for (int i = 0; i < columnGens.size(); i++) buffers[i] = columnGens.get(i).generate(rnd);
return CompositeType.build(ByteBufferAccessor.instance, buffers);
};
}
use of org.apache.cassandra.cql3.statements.BatchStatement.Type in project cassandra by apache.
the class CassandraGenerators method createColumnDefinition.
private static ColumnMetadata createColumnDefinition(String ks, String table, ColumnMetadata.Kind kind, Set<String> createdColumnNames, /* This is mutated to check for collisions, so has a side effect outside of normal random generation */
RandomnessSource rnd) {
Gen<AbstractType<?>> typeGen = AbstractTypeGenerators.typeGen();
switch(kind) {
// empty type is also not supported, so filter out
case PARTITION_KEY:
case CLUSTERING:
typeGen = Generators.filter(typeGen, t -> t != EmptyType.instance).map(AbstractType::freeze);
break;
}
if (kind == ColumnMetadata.Kind.CLUSTERING) {
// when working on a clustering column, add in reversed types periodically
typeGen = allowReversed(typeGen);
}
// filter for unique names
String str;
while (!createdColumnNames.add(str = IDENTIFIER_GEN.generate(rnd))) {
}
ColumnIdentifier name = new ColumnIdentifier(str, true);
int position = !kind.isPrimaryKeyKind() ? -1 : (int) rnd.next(Constraint.between(0, 30));
return new ColumnMetadata(ks, table, name, typeGen.generate(rnd), position, kind);
}
use of org.apache.cassandra.cql3.statements.BatchStatement.Type in project cassandra by apache.
the class SSTableHeaderFixTest method variousDroppedUserTypes.
@Test
public void variousDroppedUserTypes() throws Exception {
File dir = temporaryFolder;
TableMetadata.Builder cols = TableMetadata.builder("ks", "cf").addPartitionKeyColumn("pk", udtPK).addClusteringColumn("ck", udtCK);
ColSpec[] colSpecs = new ColSpec[] { // 'frozen<udt>' / live
new ColSpec("frozen_udt_as_frozen_udt_live", makeUDT2("frozen_udt_as_frozen_udt_live", false), makeUDT2("frozen_udt_as_frozen_udt_live", false), false, false), // 'frozen<udt>' / live / as 'udt'
new ColSpec("frozen_udt_as_unfrozen_udt_live", makeUDT2("frozen_udt_as_unfrozen_udt_live", false), makeUDT2("frozen_udt_as_unfrozen_udt_live", true), false, true), // 'frozen<udt>' / dropped
new ColSpec("frozen_udt_as_frozen_udt_dropped", makeUDT2("frozen_udt_as_frozen_udt_dropped", true).freezeNestedMulticellTypes().freeze().expandUserTypes(), makeUDT2("frozen_udt_as_frozen_udt_dropped", false), makeUDT2("frozen_udt_as_frozen_udt_dropped", false), true, false), // 'frozen<udt>' / dropped / as 'udt'
new ColSpec("frozen_udt_as_unfrozen_udt_dropped", makeUDT2("frozen_udt_as_unfrozen_udt_dropped", true).freezeNestedMulticellTypes().freeze().expandUserTypes(), makeUDT2("frozen_udt_as_unfrozen_udt_dropped", true), makeUDT2("frozen_udt_as_unfrozen_udt_dropped", false), true, true), // 'udt' / live
new ColSpec("unfrozen_udt_as_unfrozen_udt_live", makeUDT2("unfrozen_udt_as_unfrozen_udt_live", true), makeUDT2("unfrozen_udt_as_unfrozen_udt_live", true), false, false), // 'frozen<tuple>' as 'TupleType(multiCell=false' (there is nothing like 'FrozenType(TupleType(')
new ColSpec("frozen_tuple_as_frozen_tuple_live", makeTupleSimple(), makeTupleSimple(), false, false), // 'frozen<tuple>' as 'TupleType(multiCell=false' (there is nothing like 'FrozenType(TupleType(')
new ColSpec("frozen_tuple_as_frozen_tuple_dropped", makeTupleSimple(), makeTupleSimple(), true, false) };
Arrays.stream(colSpecs).forEach(c -> cols.addRegularColumn(c.name, // use the initial column type for the serialization header header.
c.preFix));
Map<String, ColSpec> colSpecMap = Arrays.stream(colSpecs).collect(Collectors.toMap(c -> c.name, c -> c));
File sstable = buildFakeSSTable(dir, 1, cols, c -> {
ColSpec cs = colSpecMap.get(c.name.toString());
if (cs == null)
return c;
// update the column type in the schema to the "correct" one.
return c.withNewType(cs.schema);
});
Arrays.stream(colSpecs).filter(c -> c.dropped).forEach(c -> {
ColumnMetadata cd = getColDef(c.name);
tableMetadata = tableMetadata.unbuild().removeRegularOrStaticColumn(cd.name).recordColumnDrop(cd, FBUtilities.timestampMicros()).build();
});
SerializationHeader.Component header = readHeader(sstable);
for (ColSpec colSpec : colSpecs) {
AbstractType<?> hdrType = header.getRegularColumns().get(ByteBufferUtil.bytes(colSpec.name));
assertEquals(colSpec.name, colSpec.preFix, hdrType);
assertEquals(colSpec.name, colSpec.preFix.isMultiCell(), hdrType.isMultiCell());
}
SSTableHeaderFix headerFix = builder().withPath(sstable.toPath()).build();
headerFix.execute();
assertFalse(headerFix.hasError());
assertTrue(headerFix.hasChanges());
// Verify that all columns to fix are in the updatedColumns set (paranoid, yet)
Arrays.stream(colSpecs).filter(c -> c.mustFix).forEach(c -> assertTrue("expect " + c.name + " to be updated, but was not (" + updatedColumns + ")", updatedColumns.contains(c.name)));
// Verify that the number of updated columns maches the expected number of columns to fix
assertEquals(Arrays.stream(colSpecs).filter(c -> c.mustFix).count(), updatedColumns.size());
header = readHeader(sstable);
for (ColSpec colSpec : colSpecs) {
AbstractType<?> hdrType = header.getRegularColumns().get(ByteBufferUtil.bytes(colSpec.name));
assertEquals(colSpec.name, colSpec.expect, hdrType);
assertEquals(colSpec.name, colSpec.expect.isMultiCell(), hdrType.isMultiCell());
}
}
use of org.apache.cassandra.cql3.statements.BatchStatement.Type in project stargate-core by tuplejump.
the class RowIndexSupport method addFields.
protected void addFields(Cell column, String name, ColumnDefinition columnDefinition, List<Field> fields) {
boolean isObject = options.isObject(name);
if (isObject) {
String value = UTF8Type.instance.compose(column.value());
JsonDocument document = new StreamingJsonDocument(value, options.primary, name);
fields.addAll(document.getFields());
} else if (column.name().isCollectionCell()) {
List<Field> fieldsForField = collectionFields((CollectionType) columnDefinition.type, name, column);
fields.addAll(fieldsForField);
} else {
FieldType fieldType = options.fieldTypes.get(name);
Type type = options.types.get(name);
addField(type, columnDefinition, name, fieldType, column.value(), fields);
if (options.containsDocValues()) {
FieldType docValueType = options.fieldDocValueTypes.get(name);
if (docValueType != null) {
Field docValueField = Fields.docValueField(name, columnDefinition.type, column.value(), docValueType);
fields.add(docValueField);
}
}
}
}
Aggregations