use of com.datastax.driver.core.ResultSet in project apex-malhar by apache.
the class AbstractUpsertOutputOperatorCodecsTest method testForSingleRowInsertWithCodecs.
@Test
public void testForSingleRowInsertWithCodecs() throws Exception {
User aUser = new User();
aUser.setUserid("user" + System.currentTimeMillis());
FullName fullName = new FullName("first1" + System.currentTimeMillis(), "last1" + System.currentTimeMillis());
aUser.setUsername(fullName);
Address address = new Address("wer", "hjfh", 12, null);
aUser.setCurrentaddress(address);
UpsertExecutionContext<User> anUpdate = new UpsertExecutionContext<>();
anUpdate.setPayload(aUser);
userUpsertOperator.beginWindow(0);
userUpsertOperator.input.process(anUpdate);
userUpsertOperator.endWindow();
ResultSet results = userUpsertOperator.session.execute("SELECT * FROM unittests.users WHERE userid = '" + aUser.getUserid() + "'");
List<Row> rows = results.all();
assertEquals(rows.size(), 1);
assertTrue(results.isExhausted());
}
use of com.datastax.driver.core.ResultSet in project apex-malhar by apache.
the class AbstractUpsertOutputOperatorCodecsTest method testForListAppend.
@Test
public void testForListAppend() throws Exception {
User aUser = new User();
String userId = "user" + System.currentTimeMillis();
aUser.setUserid(userId);
FullName fullName = new FullName("first1" + System.currentTimeMillis(), "last1" + System.currentTimeMillis());
aUser.setUsername(fullName);
Address address = new Address("street1", "city1", 13, null);
aUser.setCurrentaddress(address);
Set<String> emails = new HashSet<>();
emails.add(new String("1"));
emails.add(new String("2"));
aUser.setEmails(emails);
List<Integer> topScores = new ArrayList<>();
topScores.add(1);
topScores.add(2);
aUser.setTopScores(topScores);
UpsertExecutionContext<User> originalEntry = new UpsertExecutionContext<>();
originalEntry.setPayload(aUser);
UpsertExecutionContext<User> subsequentUpdateForTopScores = new UpsertExecutionContext<>();
subsequentUpdateForTopScores.setListPlacementStyle(UpsertExecutionContext.ListPlacementStyle.APPEND_TO_EXISTING_LIST);
subsequentUpdateForTopScores.setCollectionMutationStyle(UpsertExecutionContext.CollectionMutationStyle.ADD_TO_EXISTING_COLLECTION);
subsequentUpdateForTopScores.setNullHandlingMutationStyle(UpsertExecutionContext.NullHandlingMutationStyle.IGNORE_NULL_COLUMNS);
User oldUser = new User();
oldUser.setUserid(userId);
List<Integer> topScoresAppended = new ArrayList<>();
topScoresAppended.add(3);
oldUser.setTopScores(topScoresAppended);
subsequentUpdateForTopScores.setPayload(oldUser);
userUpsertOperator.beginWindow(1);
userUpsertOperator.input.process(originalEntry);
userUpsertOperator.input.process(subsequentUpdateForTopScores);
userUpsertOperator.endWindow();
ResultSet results = userUpsertOperator.session.execute("SELECT * FROM unittests.users WHERE userid = '" + userId + "'");
List<Row> rows = results.all();
Row userRow = rows.get(0);
List<Integer> topScoresEntry = userRow.getList("top_scores", Integer.class);
assertEquals(3, topScoresEntry.size());
assertEquals("" + 3, "" + topScoresEntry.get(2));
}
use of com.datastax.driver.core.ResultSet in project ignite by apache.
the class CassandraSessionImpl method execute.
/**
* {@inheritDoc}
*/
@Override
public <R, V> R execute(BatchExecutionAssistant<R, V> assistant, Iterable<? extends V> data) {
if (data == null || !data.iterator().hasNext())
return assistant.processedData();
int attempt = 0;
String errorMsg = "Failed to execute Cassandra " + assistant.operationName() + " operation";
Throwable error = new IgniteException(errorMsg);
RandomSleeper sleeper = newSleeper();
int dataSize = 0;
incrementSessionRefs();
try {
while (attempt < CQL_EXECUTION_ATTEMPTS_COUNT) {
if (attempt != 0) {
log.warning("Trying " + (attempt + 1) + " attempt to execute Cassandra batch " + assistant.operationName() + " operation to process rest " + (dataSize - assistant.processedCount()) + " of " + dataSize + " elements");
}
// clean errors info before next communication with Cassandra
Throwable unknownEx = null;
Throwable tblAbsenceEx = null;
Throwable hostsAvailEx = null;
Throwable prepStatEx = null;
List<Cache.Entry<Integer, ResultSetFuture>> futResults = new LinkedList<>();
PreparedStatement preparedSt = prepareStatement(assistant.getTable(), assistant.getStatement(), assistant.getPersistenceSettings(), assistant.tableExistenceRequired());
if (preparedSt == null)
return null;
int seqNum = 0;
for (V obj : data) {
if (!assistant.alreadyProcessed(seqNum)) {
try {
Statement statement = tuneStatementExecutionOptions(assistant.bindStatement(preparedSt, obj));
ResultSetFuture fut = session().executeAsync(statement);
futResults.add(new CacheEntryImpl<>(seqNum, fut));
} catch (Throwable e) {
if (CassandraHelper.isTableAbsenceError(e)) {
// If there are table absence error and it is not required for the operation we can return.
if (!assistant.tableExistenceRequired())
return assistant.processedData();
tblAbsenceEx = e;
handleTableAbsenceError(assistant.getTable(), assistant.getPersistenceSettings());
} else if (CassandraHelper.isHostsAvailabilityError(e)) {
hostsAvailEx = e;
// Handle host availability only once.
if (hostsAvailEx == null)
handleHostsAvailabilityError(e, attempt, errorMsg);
} else if (CassandraHelper.isPreparedStatementClusterError(e)) {
prepStatEx = e;
handlePreparedStatementClusterError(e);
preparedSt = prepareStatement(assistant.getTable(), assistant.getStatement(), assistant.getPersistenceSettings(), assistant.tableExistenceRequired());
} else
unknownEx = e;
}
}
seqNum++;
}
dataSize = seqNum;
// For an error which we don't know how to handle, we will not try next attempts and terminate.
if (unknownEx != null)
throw new IgniteException(errorMsg, unknownEx);
// Remembering any of last errors.
if (tblAbsenceEx != null)
error = tblAbsenceEx;
else if (hostsAvailEx != null)
error = hostsAvailEx;
else if (prepStatEx != null)
error = prepStatEx;
// Clean errors info before next communication with Cassandra.
unknownEx = null;
tblAbsenceEx = null;
hostsAvailEx = null;
prepStatEx = null;
for (Cache.Entry<Integer, ResultSetFuture> futureResult : futResults) {
try {
ResultSet resSet = futureResult.getValue().getUninterruptibly();
Row row = resSet != null && resSet.iterator().hasNext() ? resSet.iterator().next() : null;
assistant.process(row, futureResult.getKey());
} catch (Throwable e) {
if (CassandraHelper.isTableAbsenceError(e))
tblAbsenceEx = e;
else if (CassandraHelper.isHostsAvailabilityError(e))
hostsAvailEx = e;
else if (CassandraHelper.isPreparedStatementClusterError(e))
prepStatEx = e;
else
unknownEx = e;
}
}
// For an error which we don't know how to handle, we will not try next attempts and terminate.
if (unknownEx != null)
throw new IgniteException(errorMsg, unknownEx);
// If there are no errors occurred it means that operation successfully completed and we can return.
if (tblAbsenceEx == null && hostsAvailEx == null && prepStatEx == null && assistant.processedCount() == dataSize)
return assistant.processedData();
if (tblAbsenceEx != null) {
// If there are table absence error and it is not required for the operation we can return.
if (!assistant.tableExistenceRequired())
return assistant.processedData();
error = tblAbsenceEx;
handleTableAbsenceError(assistant.getTable(), assistant.getPersistenceSettings());
}
if (hostsAvailEx != null) {
error = hostsAvailEx;
handleHostsAvailabilityError(hostsAvailEx, attempt, errorMsg);
}
if (prepStatEx != null) {
error = prepStatEx;
handlePreparedStatementClusterError(prepStatEx);
}
if (!CassandraHelper.isTableAbsenceError(error))
sleeper.sleep();
attempt++;
}
} catch (Throwable e) {
error = e;
} finally {
decrementSessionRefs();
}
errorMsg = "Failed to process " + (dataSize - assistant.processedCount()) + " of " + dataSize + " elements, during " + assistant.operationName() + " operation with Cassandra";
log.error(errorMsg, error);
throw new IgniteException(errorMsg, error);
}
use of com.datastax.driver.core.ResultSet in project ignite by apache.
the class CassandraSessionImpl method execute.
/**
* {@inheritDoc}
*/
@Override
public void execute(BatchLoaderAssistant assistant) {
int attempt = 0;
String errorMsg = "Failed to execute Cassandra " + assistant.operationName() + " operation";
Throwable error = new IgniteException(errorMsg);
RandomSleeper sleeper = newSleeper();
incrementSessionRefs();
try {
while (attempt < CQL_EXECUTION_ATTEMPTS_COUNT) {
if (attempt != 0)
log.warning("Trying " + (attempt + 1) + " attempt to load Ignite cache");
Statement statement = tuneStatementExecutionOptions(assistant.getStatement());
try {
ResultSetFuture fut = session().executeAsync(statement);
ResultSet resSet = fut.getUninterruptibly();
if (resSet == null || !resSet.iterator().hasNext())
return;
for (Row row : resSet) assistant.process(row);
return;
} catch (Throwable e) {
error = e;
if (CassandraHelper.isTableAbsenceError(e))
return;
else if (CassandraHelper.isHostsAvailabilityError(e))
handleHostsAvailabilityError(e, attempt, errorMsg);
else if (CassandraHelper.isPreparedStatementClusterError(e))
handlePreparedStatementClusterError(e);
else
// For an error which we don't know how to handle, we will not try next attempts and terminate.
throw new IgniteException(errorMsg, e);
}
sleeper.sleep();
attempt++;
}
} catch (Throwable e) {
error = e;
} finally {
decrementSessionRefs();
}
log.error(errorMsg, error);
throw new IgniteException(errorMsg, error);
}
use of com.datastax.driver.core.ResultSet in project janusgraph by JanusGraph.
the class CQLResultSetKeyIteratorTest method testIterator.
@Test
public void testIterator() throws IOException {
final Array<Row> rows = Array.rangeClosed(1, 100).map(idx -> {
final Row row = mock(Row.class);
when(row.getBytes("key")).thenReturn(ByteBuffer.wrap(Integer.toString(idx / 5).getBytes()));
when(row.getBytes("column1")).thenReturn(ByteBuffer.wrap(Integer.toString(idx % 5).getBytes()));
when(row.getBytes("value")).thenReturn(ByteBuffer.wrap(Integer.toString(idx).getBytes()));
return row;
});
final ResultSet resultSet = mock(ResultSet.class);
when(resultSet.iterator()).thenReturn(rows.iterator());
final CQLColValGetter getter = new CQLColValGetter(new EntryMetaData[0]);
try (final CQLResultSetKeyIterator resultSetKeyIterator = new CQLResultSetKeyIterator(ALL_COLUMNS, getter, resultSet)) {
int i = 0;
while (resultSetKeyIterator.hasNext()) {
final StaticBuffer next = resultSetKeyIterator.next();
final RecordIterator<Entry> entries = resultSetKeyIterator.getEntries();
while (entries.hasNext()) {
final Row row = rows.get(i++);
final Entry entry = entries.next();
assertEquals(row.getBytes("key"), next.asByteBuffer());
assertEquals(row.getBytes("column1"), entry.getColumn().asByteBuffer());
assertEquals(row.getBytes("value"), entry.getValue().asByteBuffer());
}
}
}
}
Aggregations