use of com.datastax.driver.core.Row in project flink by apache.
the class CassandraConnectorITCase method verifyResultsDataPersistenceUponMissedNotify.
@Override
protected void verifyResultsDataPersistenceUponMissedNotify(CassandraTupleWriteAheadSink<Tuple3<String, Integer, Integer>> sink) {
ResultSet result = session.execute(SELECT_DATA_QUERY);
ArrayList<Integer> list = new ArrayList<>();
for (int x = 1; x <= 60; x++) {
list.add(x);
}
for (Row s : result) {
list.remove(new Integer(s.getInt("counter")));
}
Assert.assertTrue("The following ID's were not found in the ResultSet: " + list.toString(), list.isEmpty());
}
use of com.datastax.driver.core.Row in project stargate-core by tuplejump.
the class AggregatesTest method shouldReturnJSONString.
// @Test
public void shouldReturnJSONString() throws Exception {
try {
createEventStoreSchema(keyspace);
ObjectMapper jsonMapper = new ObjectMapper();
String quantileQuery = "SELECT stargate FROM " + keyspace + ".event_store WHERE stargate = '{ function:{ type:\"aggregate\", aggregates:[{type:\"sum\",field:\"measures.connection\"}] }}' ;";
Row row = getSession().execute(quantileQuery).one();
String data = row.getString("stargate");
String expectedResult = "{\"groups\":[{\"group\":{},\"aggregations\":[{\"sum\":695.0}]}]}";
JsonNode result = jsonMapper.readTree(data);
JsonNode expected = jsonMapper.readTree(expectedResult);
Assert.assertEquals(expected, result);
} finally {
dropKS(keyspace);
}
}
use of com.datastax.driver.core.Row in project stargate-core by tuplejump.
the class IndexTestBase method printResultSet.
protected int printResultSet(boolean log, ResultSet result) {
Iterator<Row> iter = result.iterator();
int count1 = 0;
while (iter.hasNext()) {
Row row = iter.next();
String rowStr = row.toString();
if (log)
System.out.println(rowStr);
Assert.assertFalse(rowStr.indexOf("error") > 0);
count1++;
}
return count1;
}
use of com.datastax.driver.core.Row in project YCSB by brianfrankcooper.
the class CassandraCQLClient method scan.
/**
* Perform a range scan for a set of records in the database. Each field/value
* pair from the result will be stored in a HashMap.
*
* Cassandra CQL uses "token" method for range scan which doesn't always yield
* intuitive results.
*
* @param table
* The name of the table
* @param startkey
* The record key of the first record to read.
* @param recordcount
* The number of records to read
* @param fields
* The list of fields to read, or null for all of them
* @param result
* A Vector of HashMaps, where each HashMap is a set field/value
* pairs for one record
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
try {
Statement stmt;
Select.Builder selectBuilder;
if (fields == null) {
selectBuilder = QueryBuilder.select().all();
} else {
selectBuilder = QueryBuilder.select();
for (String col : fields) {
((Select.Selection) selectBuilder).column(col);
}
}
stmt = selectBuilder.from(table);
// The statement builder is not setup right for tokens.
// So, we need to build it manually.
String initialStmt = stmt.toString();
StringBuilder scanStmt = new StringBuilder();
scanStmt.append(initialStmt.substring(0, initialStmt.length() - 1));
scanStmt.append(" WHERE ");
scanStmt.append(QueryBuilder.token(YCSB_KEY));
scanStmt.append(" >= ");
scanStmt.append("token('");
scanStmt.append(startkey);
scanStmt.append("')");
scanStmt.append(" LIMIT ");
scanStmt.append(recordcount);
stmt = new SimpleStatement(scanStmt.toString());
stmt.setConsistencyLevel(readConsistencyLevel);
if (debug) {
System.out.println(stmt.toString());
}
if (trace) {
stmt.enableTracing();
}
ResultSet rs = session.execute(stmt);
HashMap<String, ByteIterator> tuple;
while (!rs.isExhausted()) {
Row row = rs.one();
tuple = new HashMap<String, ByteIterator>();
ColumnDefinitions cd = row.getColumnDefinitions();
for (ColumnDefinitions.Definition def : cd) {
ByteBuffer val = row.getBytesUnsafe(def.getName());
if (val != null) {
tuple.put(def.getName(), new ByteArrayByteIterator(val.array()));
} else {
tuple.put(def.getName(), null);
}
}
result.add(tuple);
}
return Status.OK;
} catch (Exception e) {
e.printStackTrace();
System.out.println("Error scanning with startkey: " + startkey);
return Status.ERROR;
}
}
use of com.datastax.driver.core.Row in project aroma-data-operations by RedRoma.
the class MappersTest method rowFor.
private Row rowFor(User user) {
Row userRow = mock(Row.class);
when(userRow.getUUID(Tables.Users.USER_ID)).thenReturn(UUID.fromString(user.userId));
when(userRow.getString(Tables.Users.FIRST_NAME)).thenReturn(user.firstName);
when(userRow.getString(Tables.Users.MIDDLE_NAME)).thenReturn(user.middleName);
when(userRow.getString(Tables.Users.LAST_NAME)).thenReturn(user.lastName);
when(userRow.getTimestamp(Tables.Users.BIRTH_DATE)).thenReturn(new Date(user.birthdate));
return userRow;
}
Aggregations