Search in sources :

Example 6 with Query

use of com.datastax.oss.simulacron.common.request.Query in project pulsar-sink by datastax.

the class SimpleEndToEndSimulacronIT method fail_prepare_no_deletes.

@Test
void fail_prepare_no_deletes() {
    SimulacronUtils.primeTables(simulacron, schema);
    Query bad1 = makeQuery(32, "fail", 153000987000L);
    simulacron.prime(when(bad1).then(serverError("bad thing")).applyToPrepare());
    Map<String, Object> props = new HashMap<>(connectorProperties);
    props.put("topic.mytopic.ks1.table1.deletesEnabled", "false");
    Condition<Throwable> delete = new Condition<Throwable>("delete statement") {

        @Override
        public boolean matches(Throwable value) {
            return value.getMessage().contains(DELETE_STATEMENT);
        }
    };
    assertThatThrownBy(() -> task.open(props, sinkContext)).isInstanceOf(RuntimeException.class).hasMessageStartingWith("Prepare failed for statement: " + INSERT_STATEMENT).doesNotHave(delete);
}
Also used : Condition(org.assertj.core.api.Condition) Query(com.datastax.oss.simulacron.common.request.Query) HashMap(java.util.HashMap) Test(org.junit.jupiter.api.Test)

Example 7 with Query

use of com.datastax.oss.simulacron.common.request.Query in project pulsar-sink by datastax.

the class SimpleEndToEndSimulacronIT method batch_requests.

// 
// @ParameterizedTest
// @CsvSource({"All", "Driver"})
// void should_not_record_failure_offsets_for_driver_errors_if_ignore_errors_all_or_driver(
// String ignoreErrors) {
// SimulacronUtils.primeTables(simulacron, schema);
// 
// Query good1 = makeQuery(42, "the answer", 153000987000L);
// simulacron.prime(when(good1).then(noRows()));
// 
// Query bad1 = makeQuery(32, "fail", 153000987000L);
// simulacron.prime(when(bad1).then(serverError("bad thing")).delay(500,
// TimeUnit.MILLISECONDS));
// 
// Query good2 = makeQuery(22, "success", 153000987000L);
// simulacron.prime(when(good2).then(noRows()));
// 
// Query bad2 = makeQuery(12, "fail2", 153000987000L);
// simulacron.prime(when(bad2).then(serverError("bad thing")));
// 
// Query bad3 = makeQuery(2, "fail3", 153000987000L);
// simulacron.prime(when(bad3).then(serverError("bad thing")));
// 
// Map<String, String> connectorPropertiesIgnoreErrors =
// new ImmutableMap.Builder<String, String>()
// .putAll(connectorProperties)
// .put("ignoreErrors", ignoreErrors)
// .build();
// 
// conn.start(connectorPropertiesIgnoreErrors);
// 
// Record<GenericRecord> record1 = makeRecord(42, "the answer", 153000987L, 1234);
// Record<GenericRecord> record2 = makeRecord(32, "fail", 153000987L, 1235);
// Record<GenericRecord> record3 = makeRecord(22, "success", 153000987L, 1236);
// Record<GenericRecord> record4 = makeRecord(12, "fail2", 153000987L, 1237);
// 
// // Make a bad record in a different partition.
// Record<GenericRecord> record5 = makeRecord(1, 2, "fail3", 153000987L, 1238);
// runTaskWithRecords(record1, record2, record3, record4, record5);
// 
// // Verify that we get an error offset for the first record that failed in partition 0 (1235)
// // even though its failure was discovered after 1237. Also, 1238 belongs to a different
// // partition, so it should be included.
// Map<TopicPartition, OffsetAndMetadata> currentOffsets = new HashMap<>();
// task.preCommit(currentOffsets);
// assertThat(currentOffsets).isEmpty();
// 
// assertThat(logs.getAllMessagesAsString())
// .contains("Error inserting/updating row for Pulsar record
// Record<GenericRecord>{kafkaOffset=1237")
// .contains("Error inserting/updating row for Pulsar record
// Record<GenericRecord>{kafkaOffset=1238")
// .contains(
// "statement: INSERT INTO ks1.table1(a,b) VALUES (:a,:b) USING TIMESTAMP
// SinkUtil.TIMESTAMP_VARNAME");
// InstanceState instanceState = task.getInstanceState();
// assertThat(instanceState.getFailedRecordCounter("mytopic", "ks1.table1").getCount())
// .isEqualTo(3);
// assertThat(instanceState.getRecordCounter("mytopic", "ks1.table1").getCount()).isEqualTo(5);
// }
// 
// @Test
// void success_offset() {
// SimulacronUtils.primeTables(simulacron, schema);
// 
// Query good1 = makeQuery(42, "the answer", 153000987000L);
// simulacron.prime(when(good1).then(noRows()));
// 
// Query good2 = makeQuery(22, "success", 153000987000L);
// simulacron.prime(when(good2).then(noRows()));
// 
// conn.start(connectorProperties);
// 
// Record<GenericRecord> record1 = makeRecord(42, "the answer", 153000987L, 1234);
// Record<GenericRecord> record2 = makeRecord(22, "success", 153000987L, 1235);
// runTaskWithRecords(record1, record2);
// 
// Map<TopicPartition, OffsetAndMetadata> currentOffsets = new HashMap<>();
// task.preCommit(currentOffsets);
// assertThat(currentOffsets).isEmpty();
// 
// List<QueryLog> queryList =
// simulacron
// .node(0)
// .getLogs()
// .getQueryLogs()
// .stream()
// .filter(q -> q.getType().equals("EXECUTE"))
// .collect(Collectors.toList());
// assertThat(queryList.size()).isEqualTo(2);
// assertThat(queryList.get(0).getConsistency()).isEqualTo(ConsistencyLevel.LOCAL_ONE);
// assertThat(queryList.get(1).getConsistency()).isEqualTo(ConsistencyLevel.LOCAL_ONE);
// }
// 
// /** Test for KAF-72 */
// @Test
// void should_record_counters_per_topic_ks_table() {
// SimulacronUtils.primeTables(simulacron, schema);
// 
// Query good1topic1 = makeQuery(42, "the answer", 153000987000L);
// simulacron.prime(when(good1topic1).then(noRows()));
// 
// Query good2topic1 = makeQuery(22, "success", 153000987000L);
// simulacron.prime(when(good2topic1).then(noRows()));
// 
// Query good1topic2 = makeTtlQuery(22, "success", 153000987000L, 22L);
// simulacron.prime(when(good1topic2).then(noRows()));
// 
// Query good2topic2 = makeTtlQuery(33, "success_2", 153000987000L, 33L);
// simulacron.prime(when(good2topic2).then(noRows()));
// 
// conn.start(connectorProperties);
// 
// Record<GenericRecord> record1topic1 = makeRecord(42, "the answer", 153000987L, 1234);
// Record<GenericRecord> record2topic1 = makeRecord(22, "success", 153000987L, 1235);
// Record<GenericRecord> record1topic2 = makeTtlRecord(22, "success", 153000987L, 1235);
// Record<GenericRecord> record2topic2 = makeTtlRecord(33, "success_2", 153000987L, 1235);
// 
// runTaskWithRecords(record1topic1, record2topic1, record1topic2, record2topic2);
// 
// Map<TopicPartition, OffsetAndMetadata> currentOffsets = new HashMap<>();
// task.preCommit(currentOffsets);
// assertThat(currentOffsets).isEmpty();
// 
// List<QueryLog> queryList =
// simulacron
// .node(0)
// .getLogs()
// .getQueryLogs()
// .stream()
// .filter(q -> q.getType().equals("EXECUTE"))
// .collect(Collectors.toList());
// assertThat(queryList.size()).isEqualTo(4);
// assertThat(queryList.get(0).getConsistency()).isEqualTo(ConsistencyLevel.LOCAL_ONE);
// assertThat(queryList.get(1).getConsistency()).isEqualTo(ConsistencyLevel.LOCAL_ONE);
// assertThat(queryList.get(2).getConsistency()).isEqualTo(ConsistencyLevel.LOCAL_ONE);
// assertThat(queryList.get(3).getConsistency()).isEqualTo(ConsistencyLevel.LOCAL_ONE);
// 
// InstanceState instanceState = task.getInstanceState();
// assertThat(instanceState.getRecordCounter("mytopic", "ks1.table1").getCount()).isEqualTo(2);
// assertThat(instanceState.getRecordCounter("mytopic_with_ttl",
// "ks1.table1_with_ttl").getCount())
// .isEqualTo(2);
// }
// 
// @Test
// void consistency_level() {
// SimulacronUtils.primeTables(simulacron, schema);
// 
// Query good1 = makeQuery(42, "the answer", 153000987000L);
// simulacron.prime(when(good1).then(noRows()));
// 
// Query good2 =
// new Query(
// "INSERT INTO ks1.table2(a,b) VALUES (:a,:b) USING TIMESTAMP
// :SinkUtil.TIMESTAMP_VARNAME",
// Collections.emptyList(),
// makeParams(22, "success", 153000987000L),
// PARAM_TYPES);
// simulacron.prime(when(good2).then(noRows()));
// 
// conn.start(connectorProperties);
// 
// Record<GenericRecord> record1 = makeRecord(42, "the answer", 153000987L, 1234);
// 
// // Put the second record in "yourtopic", which has QUORUM CL.
// Record<GenericRecord> record2 =
// new Record<GenericRecord>(
// "yourtopic",
// 0,
// null,
// 22,
// null,
// "success",
// 1235L,
// 153000987L,
// TimestampType.CREATE_TIME);
// runTaskWithRecords(record1, record2);
// 
// List<QueryLog> queryList =
// simulacron
// .node(0)
// .getLogs()
// .getQueryLogs()
// .stream()
// .filter(q -> q.getType().equals("EXECUTE"))
// .collect(Collectors.toList());
// assertThat(queryList.size()).isEqualTo(2);
// 
// for (QueryLog queryLog : queryList) {
// if (queryLog.getQuery().contains("table1")) {
// assertThat(queryLog.getConsistency()).isEqualTo(ConsistencyLevel.LOCAL_ONE);
// } else if (queryLog.getQuery().contains("table2")) {
// assertThat(queryLog.getConsistency()).isEqualTo(ConsistencyLevel.QUORUM);
// } else {
// fail("%s is not for table1 nor table2!", queryLog.toString());
// }
// }
// }
// 
// @Test
// void undefined_topic() {
// SimulacronUtils.primeTables(simulacron, schema);
// 
// Query good1 = makeQuery(42, "the answer", 153000987000L);
// simulacron.prime(when(good1).then(noRows()));
// 
// conn.start(connectorProperties);
// 
// Record<GenericRecord> goodRecord = makeRecord(42, "the answer", 153000987L, 1234);
// 
// Record<GenericRecord> badRecord = new Record<GenericRecord>("unknown", 0, null, 42L, null,
// 42, 1234L);
// runTaskWithRecords(goodRecord, badRecord);
// assertThat(logs.getAllMessagesAsString())
// .contains("Error decoding/mapping Kafka record Record<GenericRecord>{kafkaOffset=1234")
// .contains(
// "Connector has no configuration for record topic 'unknown'. Please update the
// configuration and restart.");
// 
// // Verify that the insert for good1 was issued.
// List<QueryLog> queryList =
// simulacron
// .node(0)
// .getLogs()
// .getQueryLogs()
// .stream()
// .filter(q -> q.getType().equals("EXECUTE"))
// .collect(Collectors.toList());
// byte[] secondParam = new byte[10];
// ((Execute) queryList.get(0).getFrame().message)
// .options
// .positionalValues
// .get(1)
// .get(secondParam);
// assertThat(new String(secondParam, StandardCharsets.UTF_8)).isEqualTo("the answer");
// }
// 
@Test
void batch_requests() {
    // Insert 5 records: 2 from mytopic, 3 from yourtopic. Verify that they batch properly and
    // with the correct CLs.
    // Even though we will not be executing simple statements in this test, we must specify
    // that we will so that Simulacron handles preparing our statement properly.
    SimulacronUtils.primeTables(simulacron, schema);
    Query good1 = makeQuery(42, "the answer", 153000987000L);
    simulacron.prime(when(good1).then(noRows()));
    Query good2 = new Query("INSERT INTO ks1.table2(a,b) VALUES (:a,:b) USING TIMESTAMP :" + SinkUtil.TIMESTAMP_VARNAME, Collections.emptyList(), makeParams(42, "topic2 success1", 153000987000L), PARAM_TYPES);
    simulacron.prime(when(good2).then(noRows()));
    taskConfigs.add(connectorProperties);
    Record<GenericRecord> goodRecord1 = makeRecord(42, "the answer", 153000987L, 1234);
    Record<GenericRecord> goodRecord2 = makeRecord(42, "the second answer", 153000987L, 1234);
    Record<GenericRecord> goodRecord3 = new PulsarRecordImpl("persistent://tenant/namespace/yourtopic", "42", new GenericRecordImpl().put("field1", "topic2 success1"), recordType, 153000987L);
    Record<GenericRecord> goodRecord4 = new PulsarRecordImpl("persistent://tenant/namespace/yourtopic", "42", new GenericRecordImpl().put("field1", "topic2 success2"), recordType, 153000987L);
    Record<GenericRecord> goodRecord5 = new PulsarRecordImpl("persistent://tenant/namespace/yourtopic", "42", new GenericRecordImpl().put("field1", "topic2 success3"), recordType, 153000987L);
    ;
    // The order of records shouldn't matter here, but we try to mix things up to emulate
    // a real workload.
    runTaskWithRecords(goodRecord1, goodRecord3, goodRecord2, goodRecord4, goodRecord5);
    // Verify that we issued two batch requests, one at LOCAL_ONE (for table1/mytopic) and
    // one at QUORUM (for table2/yourtopic). There's seem pretty gnarly unwrapping of request
    // info. We distinguish one batch from the other based on the number of statements in the
    // batch.
    List<QueryLog> queryList = simulacron.node(0).getLogs().getQueryLogs().stream().filter(q -> q.getType().equals("BATCH")).collect(Collectors.toList());
    Map<ConsistencyLevel, Integer> queryInfo = queryList.stream().map(queryLog -> (Batch) queryLog.getFrame().message).collect(Collectors.toMap(message -> ConsistencyLevel.fromCode(message.consistency), message -> message.values.size()));
    assertThat(queryInfo).containsOnly(entry(ConsistencyLevel.LOCAL_ONE, 2), entry(ConsistencyLevel.QUORUM, 3));
    InstanceState instanceState = task.getInstanceState();
    // verify that was one batch with 2 statements for mytopic
    verifyOneBatchWithNStatements(instanceState.getBatchSizeHistogramSummary("mytopic", "ks1.table1"), 2);
    // verify that was one batch with 3 statements for yourtopic
    verifyOneBatchWithNStatements(instanceState.getBatchSizeHistogramSummary("yourtopic", "ks1.table2"), 3);
    // verify batchSizeInBytes updates for mytopic
    verifyBatchSizeInBytesUpdate(instanceState.getBatchSizeInBytesHistogramSummary("mytopic", "ks1.table1"), 2, false);
    // verify batchSizeInBytes updates for yourtopic
    verifyBatchSizeInBytesUpdate(instanceState.getBatchSizeInBytesHistogramSummary("yourtopic", "ks1.table2"), 3, true);
}
Also used : STDOUT(com.datastax.oss.dsbulk.tests.logging.StreamType.STDOUT) BoundCluster(com.datastax.oss.simulacron.server.BoundCluster) BeforeEach(org.junit.jupiter.api.BeforeEach) Batch(com.datastax.oss.protocol.internal.request.Batch) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) PrimeDsl.when(com.datastax.oss.simulacron.common.stubbing.PrimeDsl.when) SimulacronExtension(com.datastax.oss.dsbulk.tests.simulacron.SimulacronExtension) TestInstance(org.junit.jupiter.api.TestInstance) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) Map(java.util.Map) InstanceState(com.datastax.oss.common.sink.state.InstanceState) SinkUtil(com.datastax.oss.common.sink.util.SinkUtil) Record(org.apache.pulsar.functions.api.Record) LogInterceptor(com.datastax.oss.dsbulk.tests.logging.LogInterceptor) StreamInterceptor(com.datastax.oss.dsbulk.tests.logging.StreamInterceptor) Query(com.datastax.oss.simulacron.common.request.Query) MILLISECONDS(java.util.concurrent.TimeUnit.MILLISECONDS) InetSocketAddress(java.net.InetSocketAddress) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) Test(org.junit.jupiter.api.Test) StreamInterceptingExtension(com.datastax.oss.dsbulk.tests.logging.StreamInterceptingExtension) RecordSchemaBuilder(org.apache.pulsar.client.api.schema.RecordSchemaBuilder) DataTypes(com.datastax.oss.driver.api.core.type.DataTypes) List(java.util.List) GenericRecordImpl(com.datastax.oss.sink.pulsar.GenericRecordImpl) Column(com.datastax.oss.dsbulk.tests.simulacron.SimulacronUtils.Column) RecordCassandraSinkTask(com.datastax.oss.sink.pulsar.RecordCassandraSinkTask) STDERR(com.datastax.oss.dsbulk.tests.logging.StreamType.STDERR) Mockito.mock(org.mockito.Mockito.mock) SimulacronUtils(com.datastax.oss.dsbulk.tests.simulacron.SimulacronUtils) StreamCapture(com.datastax.oss.dsbulk.tests.logging.StreamCapture) PrimeDsl.serverError(com.datastax.oss.simulacron.common.stubbing.PrimeDsl.serverError) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) SchemaType(org.apache.pulsar.common.schema.SchemaType) ImmutableMap(com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap) ArrayList(java.util.ArrayList) Assertions.assertThatThrownBy(org.assertj.core.api.Assertions.assertThatThrownBy) QueryLog(com.datastax.oss.simulacron.common.cluster.QueryLog) Table(com.datastax.oss.dsbulk.tests.simulacron.SimulacronUtils.Table) CassandraSinkTask(com.datastax.oss.sink.pulsar.CassandraSinkTask) PrimeDsl.noRows(com.datastax.oss.simulacron.common.stubbing.PrimeDsl.noRows) Assertions.entry(org.assertj.core.api.Assertions.entry) LogCapture(com.datastax.oss.dsbulk.tests.logging.LogCapture) Schema(org.apache.pulsar.client.api.Schema) TimeUnit(java.util.concurrent.TimeUnit) GenericRecord(org.apache.pulsar.client.api.schema.GenericRecord) Mockito(org.mockito.Mockito) AfterEach(org.junit.jupiter.api.AfterEach) SinkContext(org.apache.pulsar.io.core.SinkContext) ConsistencyLevel(com.datastax.oss.simulacron.common.codec.ConsistencyLevel) SimulacronConfig(com.datastax.oss.dsbulk.tests.simulacron.annotations.SimulacronConfig) Condition(org.assertj.core.api.Condition) LifeCycleManager(com.datastax.oss.common.sink.state.LifeCycleManager) PulsarRecordImpl(com.datastax.oss.sink.pulsar.PulsarRecordImpl) LogInterceptingExtension(com.datastax.oss.dsbulk.tests.logging.LogInterceptingExtension) Collections(java.util.Collections) Query(com.datastax.oss.simulacron.common.request.Query) PulsarRecordImpl(com.datastax.oss.sink.pulsar.PulsarRecordImpl) QueryLog(com.datastax.oss.simulacron.common.cluster.QueryLog) ConsistencyLevel(com.datastax.oss.simulacron.common.codec.ConsistencyLevel) InstanceState(com.datastax.oss.common.sink.state.InstanceState) Batch(com.datastax.oss.protocol.internal.request.Batch) GenericRecordImpl(com.datastax.oss.sink.pulsar.GenericRecordImpl) GenericRecord(org.apache.pulsar.client.api.schema.GenericRecord) Test(org.junit.jupiter.api.Test)

Example 8 with Query

use of com.datastax.oss.simulacron.common.request.Query in project pulsar-sink by datastax.

the class SimpleEndToEndSimulacronIT method fail_delete.

@Test
void fail_delete() {
    SimulacronUtils.primeTables(simulacron, schema);
    Query bad1 = new Query("DELETE FROM ks1.mycounter WHERE a = :a AND b = :b", Collections.emptyList(), ImmutableMap.<String, Object>builder().put("a", 37).put("b", "delete").build(), ImmutableMap.<String, String>builder().put("a", "int").put("b", "varchar").build());
    simulacron.prime(when(bad1).then(serverError("bad thing")));
    Map<String, Object> connProps = new HashMap<>();
    connProps.put("name", INSTANCE_NAME);
    connProps.put("contactPoints", hostname);
    connProps.put("port", port);
    connProps.put("loadBalancing.localDc", "dc1");
    connProps.put("topic.mytopic.ks1.mycounter.mapping", "a=value.bigint, b=value.text, c=value.int");
    taskConfigs.add(connProps);
    RecordSchemaBuilder builder = org.apache.pulsar.client.api.schema.SchemaBuilder.record("MyBean");
    builder.field("bigint").type(SchemaType.INT64);
    builder.field("text").type(SchemaType.STRING);
    builder.field("int").type(SchemaType.INT32);
    Schema<GenericRecord> schema = org.apache.pulsar.client.api.Schema.generic(builder.build(SchemaType.AVRO));
    GenericRecord value = new GenericRecordImpl().put("bigint", 37L).put("text", "delete");
    Record<GenericRecord> record = new PulsarRecordImpl("persistent://tenant/namespace/mytopic", null, value, schema);
    runTaskWithRecords(record);
    // The log may need a little time to be updated with our error message.
    try {
        MILLISECONDS.sleep(500);
    } catch (InterruptedException e) {
    // swallow
    }
    assertThat(logs.getAllMessagesAsString()).contains("Error inserting/updating row for Pulsar record PulsarSinkRecord{PulsarRecordImpl{topic=persistent://tenant/namespace/mytopic, value=GenericRecordImpl{values={bigint=37, text=delete}}").contains("statement: DELETE FROM ks1.mycounter WHERE a = :a AND b = :b");
}
Also used : RecordSchemaBuilder(org.apache.pulsar.client.api.schema.RecordSchemaBuilder) Query(com.datastax.oss.simulacron.common.request.Query) HashMap(java.util.HashMap) PulsarRecordImpl(com.datastax.oss.sink.pulsar.PulsarRecordImpl) GenericRecordImpl(com.datastax.oss.sink.pulsar.GenericRecordImpl) GenericRecord(org.apache.pulsar.client.api.schema.GenericRecord) Test(org.junit.jupiter.api.Test)

Example 9 with Query

use of com.datastax.oss.simulacron.common.request.Query in project dsbulk by datastax.

the class SimulacronUtils method primeSystemPeersV2.

public static void primeSystemPeersV2(BoundCluster simulacron) {
    Query whenSelectSystemPeersV2 = new Query(SELECT_SYSTEM_PEERS_V2);
    ErrorResult thenThrowServerError = new ServerErrorResult("Unknown keyspace/cf pair (system.peers_v2)");
    RequestPrime primeSystemPeersV2 = new RequestPrime(whenSelectSystemPeersV2, thenThrowServerError);
    simulacron.prime(new Prime(primeSystemPeersV2));
}
Also used : Prime(com.datastax.oss.simulacron.common.stubbing.Prime) RequestPrime(com.datastax.oss.simulacron.common.cluster.RequestPrime) Query(com.datastax.oss.simulacron.common.request.Query) RequestPrime(com.datastax.oss.simulacron.common.cluster.RequestPrime) ErrorResult(com.datastax.oss.simulacron.common.result.ErrorResult) ServerErrorResult(com.datastax.oss.simulacron.common.result.ServerErrorResult) ServerErrorResult(com.datastax.oss.simulacron.common.result.ServerErrorResult)

Example 10 with Query

use of com.datastax.oss.simulacron.common.request.Query in project dsbulk by datastax.

the class SimulacronUtils method primeTables.

public static void primeTables(BoundCluster simulacron, Keyspace... keyspaces) {
    List<LinkedHashMap<String, Object>> allKeyspacesRows = new ArrayList<>();
    List<LinkedHashMap<String, Object>> allTablesRows = new ArrayList<>();
    List<LinkedHashMap<String, Object>> allColumnsRows = new ArrayList<>();
    for (Keyspace keyspace : keyspaces) {
        LinkedHashMap<String, Object> keyspaceRow = new LinkedHashMap<>();
        keyspaceRow.put("keyspace_name", keyspace.name);
        keyspaceRow.put("durable_writes", true);
        keyspaceRow.put("replication", ImmutableMap.of("class", "org.apache.cassandra.locator.SimpleStrategy", "replication_factor", "1"));
        allKeyspacesRows.add(keyspaceRow);
        Query whenSelectKeyspace = new Query(SELECT_KEYSPACES + " WHERE keyspace_name = '" + keyspace.name + '\'');
        SuccessResult thenReturnKeyspace = new SuccessResult(Collections.singletonList(keyspaceRow), new LinkedHashMap<>(KEYSPACE_COLUMNS));
        RequestPrime primeKeyspace = new RequestPrime(whenSelectKeyspace, thenReturnKeyspace);
        simulacron.prime(new Prime(primeKeyspace));
        for (Table table : keyspace.tables) {
            LinkedHashMap<String, Object> tableRow = new LinkedHashMap<>();
            tableRow.put("keyspace_name", keyspace.name);
            tableRow.put("table_name", table.name);
            tableRow.put("bloom_filter_fp_chance", 0.01d);
            tableRow.put("caching", ImmutableMap.of("keys", "ALL", "rows_per_partition", "NONE"));
            tableRow.put("cdc", null);
            tableRow.put("comment", "");
            tableRow.put("compaction", ImmutableMap.of("class", "org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy", "max_threshold", "32", "min_threshold", "4"));
            tableRow.put("compression", ImmutableMap.of("chunk_length_in_kb", "64", "class", "org.apache.cassandra.io.compress.LZ4Compressor"));
            tableRow.put("crc_check_chance", 1d);
            tableRow.put("dclocal_read_repair_chance", 0.1d);
            tableRow.put("default_time_to_live", 0);
            tableRow.put("extensions", null);
            tableRow.put("flags", ImmutableSet.of("compound"));
            tableRow.put("gc_grace_seconds", 864000);
            tableRow.put("id", UUID.randomUUID());
            tableRow.put("max_index_interval", 2048);
            tableRow.put("memtable_flush_period_in_ms", 0);
            tableRow.put("min_index_interval", 128);
            tableRow.put("read_repair_chance", 0d);
            tableRow.put("speculative_retry", "99PERCENTILE");
            allTablesRows.add(tableRow);
            Query whenSelectTable = new Query(SELECT_TABLES + " WHERE keyspace_name = '" + keyspace.name + "'  AND table_name = '" + table.name + '\'');
            SuccessResult thenReturnTable = new SuccessResult(Collections.singletonList(tableRow), new LinkedHashMap<>(TABLE_COLUMNS));
            RequestPrime primeTable = new RequestPrime(whenSelectTable, thenReturnTable);
            simulacron.prime(new Prime(primeTable));
            List<LinkedHashMap<String, Object>> tableColumnsRows = new ArrayList<>();
            int position = 0;
            for (Column column : table.partitionKey) {
                LinkedHashMap<String, Object> columnRow = new LinkedHashMap<>();
                columnRow.put("keyspace_name", keyspace.name);
                columnRow.put("table_name", table.name);
                columnRow.put("column_name", column.name);
                columnRow.put("clustering_order", "none");
                columnRow.put("column_name_bytes", column.name.getBytes(StandardCharsets.UTF_8));
                columnRow.put("kind", "partition_key");
                columnRow.put("position", position++);
                columnRow.put("type", column.getTypeAsString());
                tableColumnsRows.add(columnRow);
            }
            position = 0;
            for (Column column : table.clusteringColumns) {
                LinkedHashMap<String, Object> columnRow = new LinkedHashMap<>();
                columnRow.put("keyspace_name", keyspace.name);
                columnRow.put("table_name", table.name);
                columnRow.put("column_name", column.name);
                columnRow.put("clustering_order", "asc");
                columnRow.put("column_name_bytes", column.name.getBytes(StandardCharsets.UTF_8));
                columnRow.put("kind", "clustering");
                columnRow.put("position", position++);
                columnRow.put("type", column.getTypeAsString());
                tableColumnsRows.add(columnRow);
            }
            for (Column column : table.otherColumns) {
                LinkedHashMap<String, Object> columnRow = new LinkedHashMap<>();
                columnRow.put("keyspace_name", keyspace.name);
                columnRow.put("table_name", table.name);
                columnRow.put("column_name", column.name);
                columnRow.put("clustering_order", "none");
                columnRow.put("column_name_bytes", column.name.getBytes(StandardCharsets.UTF_8));
                columnRow.put("kind", "regular");
                columnRow.put("position", -1);
                columnRow.put("type", column.getTypeAsString());
                tableColumnsRows.add(columnRow);
            }
            Query whenSelectTableColumns = new Query(SELECT_COLUMNS + " WHERE keyspace_name = '" + keyspace.name + "'  AND table_name = '" + table.name + '\'');
            SuccessResult thenReturnTableColumns = new SuccessResult(tableColumnsRows, new LinkedHashMap<>(TABLE_COLUMNS));
            RequestPrime primeAllTableColumns = new RequestPrime(whenSelectTableColumns, thenReturnTableColumns);
            simulacron.prime(new Prime(primeAllTableColumns));
            allColumnsRows.addAll(tableColumnsRows);
            // INSERT INTO table
            Query whenInsertIntoTable = new Query(String.format("INSERT INTO %s.%s (%s) VALUES (%s)", asCql(keyspace.name), asCql(table.name), table.allColumns().stream().map(col -> asCql(col.name)).collect(COMMA), table.allColumns().stream().map(col -> ":" + asCql(col.name)).collect(COMMA)), emptyList(), new LinkedHashMap<String, Object>(), table.allColumnTypes());
            simulacron.prime(new Prime(new RequestPrime(whenInsertIntoTable, new SuccessResult(emptyList(), new LinkedHashMap<String, String>()))));
            // UPDATE table
            Query whenUpdateIntoTable = new Query(String.format("UPDATE %s.%s SET %s", asCql(keyspace.name), asCql(table.name), table.allColumns().stream().map(col -> asCql(col.name) + "=:" + asCql(col.name)).collect(COMMA)), emptyList(), new LinkedHashMap<String, Object>(), table.allColumnTypes());
            simulacron.prime(new Prime(new RequestPrime(whenUpdateIntoTable, new SuccessResult(emptyList(), new LinkedHashMap<String, String>()))));
            // SELECT cols from table
            Query whenSelectFromTable = new Query(String.format("SELECT %s FROM %s.%s", table.allColumns().stream().map(col -> asCql(col.name)).collect(COMMA), asCql(keyspace.name), asCql(table.name)));
            simulacron.prime(new Prime(new RequestPrime(whenSelectFromTable, new SuccessResult(table.rows, table.allColumnTypes()))));
            // SELECT from table WHERE token...
            Query whenSelectFromTableWhere = new Query(String.format("SELECT %s FROM %s.%s WHERE token(%s) > ? AND token(%s) <= ?", table.allColumns().stream().map(col -> asCql(col.name)).collect(COMMA), asCql(keyspace.name), asCql(table.name), table.partitionKey.stream().map(col -> asCql(col.name)).collect(COMMA), table.partitionKey.stream().map(col -> asCql(col.name)).collect(COMMA)));
            simulacron.prime(new Prime(new RequestPrime(whenSelectFromTableWhere, new SuccessResult(table.rows, table.allColumnTypes()))));
            whenSelectFromTableWhere = new Query(String.format("SELECT %s FROM %s.%s WHERE token(%s) > :start AND token(%s) <= :end", table.allColumns().stream().map(col -> asCql(col.name)).collect(COMMA), asCql(keyspace.name), asCql(table.name), table.partitionKey.stream().map(col -> asCql(col.name)).collect(COMMA), table.partitionKey.stream().map(col -> asCql(col.name)).collect(COMMA)));
            simulacron.prime(new Prime(new RequestPrime(whenSelectFromTableWhere, new SuccessResult(table.rows, table.allColumnTypes()))));
        }
    }
    Query whenSelectAllKeyspaces = new Query(SELECT_KEYSPACES);
    SuccessResult thenReturnAllKeyspaces = new SuccessResult(allKeyspacesRows, new LinkedHashMap<>(KEYSPACE_COLUMNS));
    RequestPrime primeAllKeyspaces = new RequestPrime(whenSelectAllKeyspaces, thenReturnAllKeyspaces);
    simulacron.prime(new Prime(primeAllKeyspaces));
    Query whenSelectAllTables = new Query(SELECT_TABLES);
    SuccessResult thenReturnAllTables = new SuccessResult(allTablesRows, new LinkedHashMap<>(TABLE_COLUMNS));
    RequestPrime primeAllTables = new RequestPrime(whenSelectAllTables, thenReturnAllTables);
    simulacron.prime(new Prime(primeAllTables));
    Query whenSelectAllColumns = new Query(SELECT_COLUMNS);
    SuccessResult thenReturnAllColumns = new SuccessResult(allColumnsRows, new LinkedHashMap<>(COLUMN_COLUMNS));
    RequestPrime primeAllColumns = new RequestPrime(whenSelectAllColumns, thenReturnAllColumns);
    simulacron.prime(new Prime(primeAllColumns));
}
Also used : Prime(com.datastax.oss.simulacron.common.stubbing.Prime) BoundCluster(com.datastax.oss.simulacron.server.BoundCluster) Arrays(java.util.Arrays) CqlIdentifier(com.datastax.oss.driver.api.core.CqlIdentifier) ImmutableSet(com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet) HashMap(java.util.HashMap) Sets(org.assertj.core.util.Sets) ImmutableMap(com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap) ArrayList(java.util.ArrayList) RequestPrime(com.datastax.oss.simulacron.common.cluster.RequestPrime) LinkedHashMap(java.util.LinkedHashMap) ErrorResult(com.datastax.oss.simulacron.common.result.ErrorResult) Map(java.util.Map) ServerErrorResult(com.datastax.oss.simulacron.common.result.ServerErrorResult) Collector(java.util.stream.Collector) SimpleEntry(java.util.AbstractMap.SimpleEntry) Query(com.datastax.oss.simulacron.common.request.Query) SuccessResult(com.datastax.oss.simulacron.common.result.SuccessResult) Collections.emptyList(java.util.Collections.emptyList) DataType(com.datastax.oss.driver.api.core.type.DataType) UUID(java.util.UUID) InetSocketAddress(java.net.InetSocketAddress) Collectors(java.util.stream.Collectors) StandardCharsets(java.nio.charset.StandardCharsets) DefaultProtocolVersion(com.datastax.oss.driver.api.core.DefaultProtocolVersion) List(java.util.List) BoundNode(com.datastax.oss.simulacron.server.BoundNode) Collections(java.util.Collections) TEXT(com.datastax.oss.driver.api.core.type.DataTypes.TEXT) Prime(com.datastax.oss.simulacron.common.stubbing.Prime) RequestPrime(com.datastax.oss.simulacron.common.cluster.RequestPrime) Query(com.datastax.oss.simulacron.common.request.Query) ArrayList(java.util.ArrayList) SuccessResult(com.datastax.oss.simulacron.common.result.SuccessResult) LinkedHashMap(java.util.LinkedHashMap) RequestPrime(com.datastax.oss.simulacron.common.cluster.RequestPrime)

Aggregations

Query (com.datastax.oss.simulacron.common.request.Query)14 RequestPrime (com.datastax.oss.simulacron.common.cluster.RequestPrime)7 Test (org.junit.jupiter.api.Test)7 LinkedHashMap (java.util.LinkedHashMap)6 SuccessResult (com.datastax.oss.simulacron.common.result.SuccessResult)5 ArrayList (java.util.ArrayList)5 HashMap (java.util.HashMap)4 GenericRecord (org.apache.pulsar.client.api.schema.GenericRecord)4 InstanceState (com.datastax.oss.common.sink.state.InstanceState)3 ImmutableMap (com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap)3 ErrorResult (com.datastax.oss.simulacron.common.result.ErrorResult)3 BoundCluster (com.datastax.oss.simulacron.server.BoundCluster)3 GenericRecordImpl (com.datastax.oss.sink.pulsar.GenericRecordImpl)3 PulsarRecordImpl (com.datastax.oss.sink.pulsar.PulsarRecordImpl)3 Collections (java.util.Collections)3 List (java.util.List)3 Map (java.util.Map)3 Collectors (java.util.stream.Collectors)3 TEXT (com.datastax.oss.driver.api.core.type.DataTypes.TEXT)2 ServerErrorResult (com.datastax.oss.simulacron.common.result.ServerErrorResult)2