use of com.datastax.oss.driver.api.core.CqlSession in project dsbulk by datastax.
the class ClusterInformationUtilsTest method should_get_information_about_cluster_with_one_host.
@Test
void should_get_information_about_cluster_with_one_host() {
// given
CqlSession session = DriverUtils.mockSession();
Metadata metadata = session.getMetadata();
TokenMap tokenMap = metadata.getTokenMap().get();
when(tokenMap.getPartitionerName()).thenReturn("simple-partitioner");
Node h1 = mockNode(HOST_ID_1, "1.2.3.4", "dc1");
when(metadata.getNodes()).thenReturn(ImmutableMap.of(HOST_ID_1, h1));
// when
ClusterInformation infoAboutCluster = ClusterInformationUtils.getInfoAboutCluster(session);
// then
assertThat(infoAboutCluster.getDataCenters()).containsOnly("dc1");
assertThat(infoAboutCluster.getPartitioner()).isEqualTo("simple-partitioner");
assertThat(infoAboutCluster.getNumberOfNodes()).isEqualTo(1);
assertThat(infoAboutCluster.getNodeInfos()).isEqualTo(Collections.singletonList("address: 1.2.3.4:9042, dseVersion: 6.7.0, cassandraVersion: 3.11.1, dataCenter: dc1"));
}
use of com.datastax.oss.driver.api.core.CqlSession in project dsbulk by datastax.
the class ClusterInformationUtilsTest method should_log_cluster_information_in_debug_mode.
@Test
void should_log_cluster_information_in_debug_mode(@LogCapture(value = ClusterInformationUtils.class, level = DEBUG) LogInterceptor interceptor) {
// given
CqlSession session = DriverUtils.mockSession();
Metadata metadata = session.getMetadata();
TokenMap tokenMap = metadata.getTokenMap().get();
when(tokenMap.getPartitionerName()).thenReturn("simple-partitioner");
Map<UUID, Node> nodes = IntStream.range(0, 110).mapToObj(i -> mockNode(UUID.randomUUID(), "1.2.3." + i, "dc1")).collect(Collectors.toMap(Node::getHostId, n -> n));
when(metadata.getNodes()).thenReturn(nodes);
// when
ClusterInformationUtils.printDebugInfoAboutCluster(session);
// then
assertThat(interceptor).hasMessageContaining("Partitioner: simple-partitioner");
assertThat(interceptor).hasMessageContaining("Total number of nodes: 110");
assertThat(interceptor).hasMessageContaining("Nodes:");
assertThat(interceptor).hasMessageContaining("(Other nodes omitted)");
}
use of com.datastax.oss.driver.api.core.CqlSession in project dsbulk by datastax.
the class ClusterInformationUtilsTest method should_get_information_about_cluster_with_two_different_dc.
@Test
void should_get_information_about_cluster_with_two_different_dc() {
// given
CqlSession session = DriverUtils.mockSession();
Metadata metadata = session.getMetadata();
TokenMap tokenMap = metadata.getTokenMap().get();
when(tokenMap.getPartitionerName()).thenReturn("simple-partitioner");
Node h1 = mockNode(HOST_ID_1, "1.2.3.4", "dc1");
Node h2 = mockNode(HOST_ID_2, "1.2.3.5", "dc2");
when(metadata.getNodes()).thenReturn(ImmutableMap.of(HOST_ID_1, h1, HOST_ID_2, h2));
// when
ClusterInformation infoAboutCluster = ClusterInformationUtils.getInfoAboutCluster(session);
// then
assertThat(infoAboutCluster.getDataCenters()).containsExactlyInAnyOrder("dc1", "dc2");
assertThat(infoAboutCluster.getPartitioner()).isEqualTo("simple-partitioner");
assertThat(infoAboutCluster.getNumberOfNodes()).isEqualTo(2);
assertThat(infoAboutCluster.getNodeInfos()).isEqualTo(Arrays.asList("address: 1.2.3.4:9042, dseVersion: 6.7.0, cassandraVersion: 3.11.1, dataCenter: dc1", "address: 1.2.3.5:9042, dseVersion: 6.7.0, cassandraVersion: 3.11.1, dataCenter: dc2"));
}
use of com.datastax.oss.driver.api.core.CqlSession in project dsbulk by datastax.
the class SchemaSettings method createReadStatements.
public List<Statement<?>> createReadStatements(CqlSession session) {
PreparedStatement preparedStatement = preparedStatements.get(0);
ColumnDefinitions variables = preparedStatement.getVariableDefinitions();
if (variables.size() == 0) {
return Collections.singletonList(preparedStatement.bind());
}
boolean ok = true;
Optional<CQLWord> start = queryInspector.getTokenRangeRestrictionStartVariable();
Optional<CQLWord> end = queryInspector.getTokenRangeRestrictionEndVariable();
if (!start.isPresent() || !end.isPresent()) {
ok = false;
}
if (start.isPresent() && end.isPresent()) {
Optional<CQLWord> unrecognized = StreamSupport.stream(variables.spliterator(), false).map(columnDefinition -> columnDefinition.getName().asInternal()).map(CQLWord::fromInternal).filter(name -> !name.equals(start.get()) && !name.equals(end.get())).findAny();
ok = !unrecognized.isPresent();
}
if (!ok) {
throw new IllegalArgumentException("The provided statement (schema.query) contains unrecognized WHERE restrictions; " + "the WHERE clause is only allowed to contain one token range restriction " + "of the form: WHERE token(...) > ? AND token(...) <= ?");
}
Metadata metadata = session.getMetadata();
TokenRangeReadStatementGenerator generator = new TokenRangeReadStatementGenerator(table, metadata);
List<Statement<?>> statements = generator.generate(splits, range -> preparedStatement.bind().setToken(queryInspector.getTokenRangeRestrictionStartVariableIndex(), range.getStart()).setToken(queryInspector.getTokenRangeRestrictionEndVariableIndex(), range.getEnd()));
LOGGER.debug("Generated {} bound statements", statements.size());
// Shuffle the statements to avoid hitting the same replicas sequentially when
// the statements will be executed.
Collections.shuffle(statements);
return statements;
}
use of com.datastax.oss.driver.api.core.CqlSession in project zeppelin by apache.
the class CassandraInterpreterTest method should_execute_statement_with_timestamp_option.
@Test
public void should_execute_statement_with_timestamp_option() throws Exception {
// Given
String statement1 = "INSERT INTO zeppelin.ts(key,val) VALUES('k','v1');";
String statement2 = "@timestamp=15\n" + "INSERT INTO zeppelin.ts(key,val) VALUES('k','v2');";
CqlSession session = EmbeddedCassandraServerHelper.getSession();
// Insert v1 with current timestamp
interpreter.interpret(statement1, intrContext);
System.out.println("going to read data from zeppelin.ts;");
session.execute("SELECT val FROM zeppelin.ts LIMIT 1").forEach(x -> System.out.println("row " + x));
Thread.sleep(1);
// When
// Insert v2 with past timestamp
interpreter.interpret(statement2, intrContext);
System.out.println("going to read data from zeppelin.ts;");
session.execute("SELECT val FROM zeppelin.ts LIMIT 1").forEach(x -> System.out.println("row " + x));
final String actual = session.execute("SELECT val FROM zeppelin.ts LIMIT 1").one().getString("val");
// Then
assertThat(actual).isEqualTo("v1");
}
Aggregations