use of com.datastax.oss.simulacron.common.request.Query in project pulsar-sink by datastax.
the class SimpleEndToEndSimulacronIT method fail_prepare_no_deletes.
@Test
void fail_prepare_no_deletes() {
SimulacronUtils.primeTables(simulacron, schema);
Query bad1 = makeQuery(32, "fail", 153000987000L);
simulacron.prime(when(bad1).then(serverError("bad thing")).applyToPrepare());
Map<String, Object> props = new HashMap<>(connectorProperties);
props.put("topic.mytopic.ks1.table1.deletesEnabled", "false");
Condition<Throwable> delete = new Condition<Throwable>("delete statement") {
@Override
public boolean matches(Throwable value) {
return value.getMessage().contains(DELETE_STATEMENT);
}
};
assertThatThrownBy(() -> task.open(props, sinkContext)).isInstanceOf(RuntimeException.class).hasMessageStartingWith("Prepare failed for statement: " + INSERT_STATEMENT).doesNotHave(delete);
}
use of com.datastax.oss.simulacron.common.request.Query in project pulsar-sink by datastax.
the class SimpleEndToEndSimulacronIT method batch_requests.
//
// @ParameterizedTest
// @CsvSource({"All", "Driver"})
// void should_not_record_failure_offsets_for_driver_errors_if_ignore_errors_all_or_driver(
// String ignoreErrors) {
// SimulacronUtils.primeTables(simulacron, schema);
//
// Query good1 = makeQuery(42, "the answer", 153000987000L);
// simulacron.prime(when(good1).then(noRows()));
//
// Query bad1 = makeQuery(32, "fail", 153000987000L);
// simulacron.prime(when(bad1).then(serverError("bad thing")).delay(500,
// TimeUnit.MILLISECONDS));
//
// Query good2 = makeQuery(22, "success", 153000987000L);
// simulacron.prime(when(good2).then(noRows()));
//
// Query bad2 = makeQuery(12, "fail2", 153000987000L);
// simulacron.prime(when(bad2).then(serverError("bad thing")));
//
// Query bad3 = makeQuery(2, "fail3", 153000987000L);
// simulacron.prime(when(bad3).then(serverError("bad thing")));
//
// Map<String, String> connectorPropertiesIgnoreErrors =
// new ImmutableMap.Builder<String, String>()
// .putAll(connectorProperties)
// .put("ignoreErrors", ignoreErrors)
// .build();
//
// conn.start(connectorPropertiesIgnoreErrors);
//
// Record<GenericRecord> record1 = makeRecord(42, "the answer", 153000987L, 1234);
// Record<GenericRecord> record2 = makeRecord(32, "fail", 153000987L, 1235);
// Record<GenericRecord> record3 = makeRecord(22, "success", 153000987L, 1236);
// Record<GenericRecord> record4 = makeRecord(12, "fail2", 153000987L, 1237);
//
// // Make a bad record in a different partition.
// Record<GenericRecord> record5 = makeRecord(1, 2, "fail3", 153000987L, 1238);
// runTaskWithRecords(record1, record2, record3, record4, record5);
//
// // Verify that we get an error offset for the first record that failed in partition 0 (1235)
// // even though its failure was discovered after 1237. Also, 1238 belongs to a different
// // partition, so it should be included.
// Map<TopicPartition, OffsetAndMetadata> currentOffsets = new HashMap<>();
// task.preCommit(currentOffsets);
// assertThat(currentOffsets).isEmpty();
//
// assertThat(logs.getAllMessagesAsString())
// .contains("Error inserting/updating row for Pulsar record
// Record<GenericRecord>{kafkaOffset=1237")
// .contains("Error inserting/updating row for Pulsar record
// Record<GenericRecord>{kafkaOffset=1238")
// .contains(
// "statement: INSERT INTO ks1.table1(a,b) VALUES (:a,:b) USING TIMESTAMP
// SinkUtil.TIMESTAMP_VARNAME");
// InstanceState instanceState = task.getInstanceState();
// assertThat(instanceState.getFailedRecordCounter("mytopic", "ks1.table1").getCount())
// .isEqualTo(3);
// assertThat(instanceState.getRecordCounter("mytopic", "ks1.table1").getCount()).isEqualTo(5);
// }
//
// @Test
// void success_offset() {
// SimulacronUtils.primeTables(simulacron, schema);
//
// Query good1 = makeQuery(42, "the answer", 153000987000L);
// simulacron.prime(when(good1).then(noRows()));
//
// Query good2 = makeQuery(22, "success", 153000987000L);
// simulacron.prime(when(good2).then(noRows()));
//
// conn.start(connectorProperties);
//
// Record<GenericRecord> record1 = makeRecord(42, "the answer", 153000987L, 1234);
// Record<GenericRecord> record2 = makeRecord(22, "success", 153000987L, 1235);
// runTaskWithRecords(record1, record2);
//
// Map<TopicPartition, OffsetAndMetadata> currentOffsets = new HashMap<>();
// task.preCommit(currentOffsets);
// assertThat(currentOffsets).isEmpty();
//
// List<QueryLog> queryList =
// simulacron
// .node(0)
// .getLogs()
// .getQueryLogs()
// .stream()
// .filter(q -> q.getType().equals("EXECUTE"))
// .collect(Collectors.toList());
// assertThat(queryList.size()).isEqualTo(2);
// assertThat(queryList.get(0).getConsistency()).isEqualTo(ConsistencyLevel.LOCAL_ONE);
// assertThat(queryList.get(1).getConsistency()).isEqualTo(ConsistencyLevel.LOCAL_ONE);
// }
//
// /** Test for KAF-72 */
// @Test
// void should_record_counters_per_topic_ks_table() {
// SimulacronUtils.primeTables(simulacron, schema);
//
// Query good1topic1 = makeQuery(42, "the answer", 153000987000L);
// simulacron.prime(when(good1topic1).then(noRows()));
//
// Query good2topic1 = makeQuery(22, "success", 153000987000L);
// simulacron.prime(when(good2topic1).then(noRows()));
//
// Query good1topic2 = makeTtlQuery(22, "success", 153000987000L, 22L);
// simulacron.prime(when(good1topic2).then(noRows()));
//
// Query good2topic2 = makeTtlQuery(33, "success_2", 153000987000L, 33L);
// simulacron.prime(when(good2topic2).then(noRows()));
//
// conn.start(connectorProperties);
//
// Record<GenericRecord> record1topic1 = makeRecord(42, "the answer", 153000987L, 1234);
// Record<GenericRecord> record2topic1 = makeRecord(22, "success", 153000987L, 1235);
// Record<GenericRecord> record1topic2 = makeTtlRecord(22, "success", 153000987L, 1235);
// Record<GenericRecord> record2topic2 = makeTtlRecord(33, "success_2", 153000987L, 1235);
//
// runTaskWithRecords(record1topic1, record2topic1, record1topic2, record2topic2);
//
// Map<TopicPartition, OffsetAndMetadata> currentOffsets = new HashMap<>();
// task.preCommit(currentOffsets);
// assertThat(currentOffsets).isEmpty();
//
// List<QueryLog> queryList =
// simulacron
// .node(0)
// .getLogs()
// .getQueryLogs()
// .stream()
// .filter(q -> q.getType().equals("EXECUTE"))
// .collect(Collectors.toList());
// assertThat(queryList.size()).isEqualTo(4);
// assertThat(queryList.get(0).getConsistency()).isEqualTo(ConsistencyLevel.LOCAL_ONE);
// assertThat(queryList.get(1).getConsistency()).isEqualTo(ConsistencyLevel.LOCAL_ONE);
// assertThat(queryList.get(2).getConsistency()).isEqualTo(ConsistencyLevel.LOCAL_ONE);
// assertThat(queryList.get(3).getConsistency()).isEqualTo(ConsistencyLevel.LOCAL_ONE);
//
// InstanceState instanceState = task.getInstanceState();
// assertThat(instanceState.getRecordCounter("mytopic", "ks1.table1").getCount()).isEqualTo(2);
// assertThat(instanceState.getRecordCounter("mytopic_with_ttl",
// "ks1.table1_with_ttl").getCount())
// .isEqualTo(2);
// }
//
// @Test
// void consistency_level() {
// SimulacronUtils.primeTables(simulacron, schema);
//
// Query good1 = makeQuery(42, "the answer", 153000987000L);
// simulacron.prime(when(good1).then(noRows()));
//
// Query good2 =
// new Query(
// "INSERT INTO ks1.table2(a,b) VALUES (:a,:b) USING TIMESTAMP
// :SinkUtil.TIMESTAMP_VARNAME",
// Collections.emptyList(),
// makeParams(22, "success", 153000987000L),
// PARAM_TYPES);
// simulacron.prime(when(good2).then(noRows()));
//
// conn.start(connectorProperties);
//
// Record<GenericRecord> record1 = makeRecord(42, "the answer", 153000987L, 1234);
//
// // Put the second record in "yourtopic", which has QUORUM CL.
// Record<GenericRecord> record2 =
// new Record<GenericRecord>(
// "yourtopic",
// 0,
// null,
// 22,
// null,
// "success",
// 1235L,
// 153000987L,
// TimestampType.CREATE_TIME);
// runTaskWithRecords(record1, record2);
//
// List<QueryLog> queryList =
// simulacron
// .node(0)
// .getLogs()
// .getQueryLogs()
// .stream()
// .filter(q -> q.getType().equals("EXECUTE"))
// .collect(Collectors.toList());
// assertThat(queryList.size()).isEqualTo(2);
//
// for (QueryLog queryLog : queryList) {
// if (queryLog.getQuery().contains("table1")) {
// assertThat(queryLog.getConsistency()).isEqualTo(ConsistencyLevel.LOCAL_ONE);
// } else if (queryLog.getQuery().contains("table2")) {
// assertThat(queryLog.getConsistency()).isEqualTo(ConsistencyLevel.QUORUM);
// } else {
// fail("%s is not for table1 nor table2!", queryLog.toString());
// }
// }
// }
//
// @Test
// void undefined_topic() {
// SimulacronUtils.primeTables(simulacron, schema);
//
// Query good1 = makeQuery(42, "the answer", 153000987000L);
// simulacron.prime(when(good1).then(noRows()));
//
// conn.start(connectorProperties);
//
// Record<GenericRecord> goodRecord = makeRecord(42, "the answer", 153000987L, 1234);
//
// Record<GenericRecord> badRecord = new Record<GenericRecord>("unknown", 0, null, 42L, null,
// 42, 1234L);
// runTaskWithRecords(goodRecord, badRecord);
// assertThat(logs.getAllMessagesAsString())
// .contains("Error decoding/mapping Kafka record Record<GenericRecord>{kafkaOffset=1234")
// .contains(
// "Connector has no configuration for record topic 'unknown'. Please update the
// configuration and restart.");
//
// // Verify that the insert for good1 was issued.
// List<QueryLog> queryList =
// simulacron
// .node(0)
// .getLogs()
// .getQueryLogs()
// .stream()
// .filter(q -> q.getType().equals("EXECUTE"))
// .collect(Collectors.toList());
// byte[] secondParam = new byte[10];
// ((Execute) queryList.get(0).getFrame().message)
// .options
// .positionalValues
// .get(1)
// .get(secondParam);
// assertThat(new String(secondParam, StandardCharsets.UTF_8)).isEqualTo("the answer");
// }
//
@Test
void batch_requests() {
// Insert 5 records: 2 from mytopic, 3 from yourtopic. Verify that they batch properly and
// with the correct CLs.
// Even though we will not be executing simple statements in this test, we must specify
// that we will so that Simulacron handles preparing our statement properly.
SimulacronUtils.primeTables(simulacron, schema);
Query good1 = makeQuery(42, "the answer", 153000987000L);
simulacron.prime(when(good1).then(noRows()));
Query good2 = new Query("INSERT INTO ks1.table2(a,b) VALUES (:a,:b) USING TIMESTAMP :" + SinkUtil.TIMESTAMP_VARNAME, Collections.emptyList(), makeParams(42, "topic2 success1", 153000987000L), PARAM_TYPES);
simulacron.prime(when(good2).then(noRows()));
taskConfigs.add(connectorProperties);
Record<GenericRecord> goodRecord1 = makeRecord(42, "the answer", 153000987L, 1234);
Record<GenericRecord> goodRecord2 = makeRecord(42, "the second answer", 153000987L, 1234);
Record<GenericRecord> goodRecord3 = new PulsarRecordImpl("persistent://tenant/namespace/yourtopic", "42", new GenericRecordImpl().put("field1", "topic2 success1"), recordType, 153000987L);
Record<GenericRecord> goodRecord4 = new PulsarRecordImpl("persistent://tenant/namespace/yourtopic", "42", new GenericRecordImpl().put("field1", "topic2 success2"), recordType, 153000987L);
Record<GenericRecord> goodRecord5 = new PulsarRecordImpl("persistent://tenant/namespace/yourtopic", "42", new GenericRecordImpl().put("field1", "topic2 success3"), recordType, 153000987L);
;
// The order of records shouldn't matter here, but we try to mix things up to emulate
// a real workload.
runTaskWithRecords(goodRecord1, goodRecord3, goodRecord2, goodRecord4, goodRecord5);
// Verify that we issued two batch requests, one at LOCAL_ONE (for table1/mytopic) and
// one at QUORUM (for table2/yourtopic). There's seem pretty gnarly unwrapping of request
// info. We distinguish one batch from the other based on the number of statements in the
// batch.
List<QueryLog> queryList = simulacron.node(0).getLogs().getQueryLogs().stream().filter(q -> q.getType().equals("BATCH")).collect(Collectors.toList());
Map<ConsistencyLevel, Integer> queryInfo = queryList.stream().map(queryLog -> (Batch) queryLog.getFrame().message).collect(Collectors.toMap(message -> ConsistencyLevel.fromCode(message.consistency), message -> message.values.size()));
assertThat(queryInfo).containsOnly(entry(ConsistencyLevel.LOCAL_ONE, 2), entry(ConsistencyLevel.QUORUM, 3));
InstanceState instanceState = task.getInstanceState();
// verify that was one batch with 2 statements for mytopic
verifyOneBatchWithNStatements(instanceState.getBatchSizeHistogramSummary("mytopic", "ks1.table1"), 2);
// verify that was one batch with 3 statements for yourtopic
verifyOneBatchWithNStatements(instanceState.getBatchSizeHistogramSummary("yourtopic", "ks1.table2"), 3);
// verify batchSizeInBytes updates for mytopic
verifyBatchSizeInBytesUpdate(instanceState.getBatchSizeInBytesHistogramSummary("mytopic", "ks1.table1"), 2, false);
// verify batchSizeInBytes updates for yourtopic
verifyBatchSizeInBytesUpdate(instanceState.getBatchSizeInBytesHistogramSummary("yourtopic", "ks1.table2"), 3, true);
}
use of com.datastax.oss.simulacron.common.request.Query in project pulsar-sink by datastax.
the class SimpleEndToEndSimulacronIT method fail_delete.
@Test
void fail_delete() {
SimulacronUtils.primeTables(simulacron, schema);
Query bad1 = new Query("DELETE FROM ks1.mycounter WHERE a = :a AND b = :b", Collections.emptyList(), ImmutableMap.<String, Object>builder().put("a", 37).put("b", "delete").build(), ImmutableMap.<String, String>builder().put("a", "int").put("b", "varchar").build());
simulacron.prime(when(bad1).then(serverError("bad thing")));
Map<String, Object> connProps = new HashMap<>();
connProps.put("name", INSTANCE_NAME);
connProps.put("contactPoints", hostname);
connProps.put("port", port);
connProps.put("loadBalancing.localDc", "dc1");
connProps.put("topic.mytopic.ks1.mycounter.mapping", "a=value.bigint, b=value.text, c=value.int");
taskConfigs.add(connProps);
RecordSchemaBuilder builder = org.apache.pulsar.client.api.schema.SchemaBuilder.record("MyBean");
builder.field("bigint").type(SchemaType.INT64);
builder.field("text").type(SchemaType.STRING);
builder.field("int").type(SchemaType.INT32);
Schema<GenericRecord> schema = org.apache.pulsar.client.api.Schema.generic(builder.build(SchemaType.AVRO));
GenericRecord value = new GenericRecordImpl().put("bigint", 37L).put("text", "delete");
Record<GenericRecord> record = new PulsarRecordImpl("persistent://tenant/namespace/mytopic", null, value, schema);
runTaskWithRecords(record);
// The log may need a little time to be updated with our error message.
try {
MILLISECONDS.sleep(500);
} catch (InterruptedException e) {
// swallow
}
assertThat(logs.getAllMessagesAsString()).contains("Error inserting/updating row for Pulsar record PulsarSinkRecord{PulsarRecordImpl{topic=persistent://tenant/namespace/mytopic, value=GenericRecordImpl{values={bigint=37, text=delete}}").contains("statement: DELETE FROM ks1.mycounter WHERE a = :a AND b = :b");
}
use of com.datastax.oss.simulacron.common.request.Query in project dsbulk by datastax.
the class SimulacronUtils method primeSystemPeersV2.
public static void primeSystemPeersV2(BoundCluster simulacron) {
Query whenSelectSystemPeersV2 = new Query(SELECT_SYSTEM_PEERS_V2);
ErrorResult thenThrowServerError = new ServerErrorResult("Unknown keyspace/cf pair (system.peers_v2)");
RequestPrime primeSystemPeersV2 = new RequestPrime(whenSelectSystemPeersV2, thenThrowServerError);
simulacron.prime(new Prime(primeSystemPeersV2));
}
use of com.datastax.oss.simulacron.common.request.Query in project dsbulk by datastax.
the class SimulacronUtils method primeTables.
public static void primeTables(BoundCluster simulacron, Keyspace... keyspaces) {
List<LinkedHashMap<String, Object>> allKeyspacesRows = new ArrayList<>();
List<LinkedHashMap<String, Object>> allTablesRows = new ArrayList<>();
List<LinkedHashMap<String, Object>> allColumnsRows = new ArrayList<>();
for (Keyspace keyspace : keyspaces) {
LinkedHashMap<String, Object> keyspaceRow = new LinkedHashMap<>();
keyspaceRow.put("keyspace_name", keyspace.name);
keyspaceRow.put("durable_writes", true);
keyspaceRow.put("replication", ImmutableMap.of("class", "org.apache.cassandra.locator.SimpleStrategy", "replication_factor", "1"));
allKeyspacesRows.add(keyspaceRow);
Query whenSelectKeyspace = new Query(SELECT_KEYSPACES + " WHERE keyspace_name = '" + keyspace.name + '\'');
SuccessResult thenReturnKeyspace = new SuccessResult(Collections.singletonList(keyspaceRow), new LinkedHashMap<>(KEYSPACE_COLUMNS));
RequestPrime primeKeyspace = new RequestPrime(whenSelectKeyspace, thenReturnKeyspace);
simulacron.prime(new Prime(primeKeyspace));
for (Table table : keyspace.tables) {
LinkedHashMap<String, Object> tableRow = new LinkedHashMap<>();
tableRow.put("keyspace_name", keyspace.name);
tableRow.put("table_name", table.name);
tableRow.put("bloom_filter_fp_chance", 0.01d);
tableRow.put("caching", ImmutableMap.of("keys", "ALL", "rows_per_partition", "NONE"));
tableRow.put("cdc", null);
tableRow.put("comment", "");
tableRow.put("compaction", ImmutableMap.of("class", "org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy", "max_threshold", "32", "min_threshold", "4"));
tableRow.put("compression", ImmutableMap.of("chunk_length_in_kb", "64", "class", "org.apache.cassandra.io.compress.LZ4Compressor"));
tableRow.put("crc_check_chance", 1d);
tableRow.put("dclocal_read_repair_chance", 0.1d);
tableRow.put("default_time_to_live", 0);
tableRow.put("extensions", null);
tableRow.put("flags", ImmutableSet.of("compound"));
tableRow.put("gc_grace_seconds", 864000);
tableRow.put("id", UUID.randomUUID());
tableRow.put("max_index_interval", 2048);
tableRow.put("memtable_flush_period_in_ms", 0);
tableRow.put("min_index_interval", 128);
tableRow.put("read_repair_chance", 0d);
tableRow.put("speculative_retry", "99PERCENTILE");
allTablesRows.add(tableRow);
Query whenSelectTable = new Query(SELECT_TABLES + " WHERE keyspace_name = '" + keyspace.name + "' AND table_name = '" + table.name + '\'');
SuccessResult thenReturnTable = new SuccessResult(Collections.singletonList(tableRow), new LinkedHashMap<>(TABLE_COLUMNS));
RequestPrime primeTable = new RequestPrime(whenSelectTable, thenReturnTable);
simulacron.prime(new Prime(primeTable));
List<LinkedHashMap<String, Object>> tableColumnsRows = new ArrayList<>();
int position = 0;
for (Column column : table.partitionKey) {
LinkedHashMap<String, Object> columnRow = new LinkedHashMap<>();
columnRow.put("keyspace_name", keyspace.name);
columnRow.put("table_name", table.name);
columnRow.put("column_name", column.name);
columnRow.put("clustering_order", "none");
columnRow.put("column_name_bytes", column.name.getBytes(StandardCharsets.UTF_8));
columnRow.put("kind", "partition_key");
columnRow.put("position", position++);
columnRow.put("type", column.getTypeAsString());
tableColumnsRows.add(columnRow);
}
position = 0;
for (Column column : table.clusteringColumns) {
LinkedHashMap<String, Object> columnRow = new LinkedHashMap<>();
columnRow.put("keyspace_name", keyspace.name);
columnRow.put("table_name", table.name);
columnRow.put("column_name", column.name);
columnRow.put("clustering_order", "asc");
columnRow.put("column_name_bytes", column.name.getBytes(StandardCharsets.UTF_8));
columnRow.put("kind", "clustering");
columnRow.put("position", position++);
columnRow.put("type", column.getTypeAsString());
tableColumnsRows.add(columnRow);
}
for (Column column : table.otherColumns) {
LinkedHashMap<String, Object> columnRow = new LinkedHashMap<>();
columnRow.put("keyspace_name", keyspace.name);
columnRow.put("table_name", table.name);
columnRow.put("column_name", column.name);
columnRow.put("clustering_order", "none");
columnRow.put("column_name_bytes", column.name.getBytes(StandardCharsets.UTF_8));
columnRow.put("kind", "regular");
columnRow.put("position", -1);
columnRow.put("type", column.getTypeAsString());
tableColumnsRows.add(columnRow);
}
Query whenSelectTableColumns = new Query(SELECT_COLUMNS + " WHERE keyspace_name = '" + keyspace.name + "' AND table_name = '" + table.name + '\'');
SuccessResult thenReturnTableColumns = new SuccessResult(tableColumnsRows, new LinkedHashMap<>(TABLE_COLUMNS));
RequestPrime primeAllTableColumns = new RequestPrime(whenSelectTableColumns, thenReturnTableColumns);
simulacron.prime(new Prime(primeAllTableColumns));
allColumnsRows.addAll(tableColumnsRows);
// INSERT INTO table
Query whenInsertIntoTable = new Query(String.format("INSERT INTO %s.%s (%s) VALUES (%s)", asCql(keyspace.name), asCql(table.name), table.allColumns().stream().map(col -> asCql(col.name)).collect(COMMA), table.allColumns().stream().map(col -> ":" + asCql(col.name)).collect(COMMA)), emptyList(), new LinkedHashMap<String, Object>(), table.allColumnTypes());
simulacron.prime(new Prime(new RequestPrime(whenInsertIntoTable, new SuccessResult(emptyList(), new LinkedHashMap<String, String>()))));
// UPDATE table
Query whenUpdateIntoTable = new Query(String.format("UPDATE %s.%s SET %s", asCql(keyspace.name), asCql(table.name), table.allColumns().stream().map(col -> asCql(col.name) + "=:" + asCql(col.name)).collect(COMMA)), emptyList(), new LinkedHashMap<String, Object>(), table.allColumnTypes());
simulacron.prime(new Prime(new RequestPrime(whenUpdateIntoTable, new SuccessResult(emptyList(), new LinkedHashMap<String, String>()))));
// SELECT cols from table
Query whenSelectFromTable = new Query(String.format("SELECT %s FROM %s.%s", table.allColumns().stream().map(col -> asCql(col.name)).collect(COMMA), asCql(keyspace.name), asCql(table.name)));
simulacron.prime(new Prime(new RequestPrime(whenSelectFromTable, new SuccessResult(table.rows, table.allColumnTypes()))));
// SELECT from table WHERE token...
Query whenSelectFromTableWhere = new Query(String.format("SELECT %s FROM %s.%s WHERE token(%s) > ? AND token(%s) <= ?", table.allColumns().stream().map(col -> asCql(col.name)).collect(COMMA), asCql(keyspace.name), asCql(table.name), table.partitionKey.stream().map(col -> asCql(col.name)).collect(COMMA), table.partitionKey.stream().map(col -> asCql(col.name)).collect(COMMA)));
simulacron.prime(new Prime(new RequestPrime(whenSelectFromTableWhere, new SuccessResult(table.rows, table.allColumnTypes()))));
whenSelectFromTableWhere = new Query(String.format("SELECT %s FROM %s.%s WHERE token(%s) > :start AND token(%s) <= :end", table.allColumns().stream().map(col -> asCql(col.name)).collect(COMMA), asCql(keyspace.name), asCql(table.name), table.partitionKey.stream().map(col -> asCql(col.name)).collect(COMMA), table.partitionKey.stream().map(col -> asCql(col.name)).collect(COMMA)));
simulacron.prime(new Prime(new RequestPrime(whenSelectFromTableWhere, new SuccessResult(table.rows, table.allColumnTypes()))));
}
}
Query whenSelectAllKeyspaces = new Query(SELECT_KEYSPACES);
SuccessResult thenReturnAllKeyspaces = new SuccessResult(allKeyspacesRows, new LinkedHashMap<>(KEYSPACE_COLUMNS));
RequestPrime primeAllKeyspaces = new RequestPrime(whenSelectAllKeyspaces, thenReturnAllKeyspaces);
simulacron.prime(new Prime(primeAllKeyspaces));
Query whenSelectAllTables = new Query(SELECT_TABLES);
SuccessResult thenReturnAllTables = new SuccessResult(allTablesRows, new LinkedHashMap<>(TABLE_COLUMNS));
RequestPrime primeAllTables = new RequestPrime(whenSelectAllTables, thenReturnAllTables);
simulacron.prime(new Prime(primeAllTables));
Query whenSelectAllColumns = new Query(SELECT_COLUMNS);
SuccessResult thenReturnAllColumns = new SuccessResult(allColumnsRows, new LinkedHashMap<>(COLUMN_COLUMNS));
RequestPrime primeAllColumns = new RequestPrime(whenSelectAllColumns, thenReturnAllColumns);
simulacron.prime(new Prime(primeAllColumns));
}
Aggregations