use of com.datastax.driver.core.BatchStatement in project aroma-data-operations by RedRoma.
the class CassandraApplicationRepository method createDeleteStatementFor.
private Statement createDeleteStatementFor(Application app) {
BatchStatement batch = new BatchStatement();
UUID appId = UUID.fromString(app.applicationId);
Statement deleteFromMainTable = QueryBuilder.delete().all().from(TABLE_NAME).where(eq(APP_ID, appId));
batch.add(deleteFromMainTable);
Statement deleteFromRecentsTable = QueryBuilder.delete().all().from(TABLE_NAME_RECENTLY_CREATED).where(eq(APP_ID, appId));
batch.add(deleteFromRecentsTable);
return batch;
}
use of com.datastax.driver.core.BatchStatement in project cassandra by apache.
the class BatchMetricsTest method executeBatch.
private void executeBatch(boolean isLogged, int distinctPartitions, int statementsPerPartition) {
BatchStatement.Type batchType;
if (isLogged) {
batchType = BatchStatement.Type.LOGGED;
} else {
batchType = BatchStatement.Type.UNLOGGED;
}
BatchStatement batch = new BatchStatement(batchType);
for (int i = 0; i < distinctPartitions; i++) {
for (int j = 0; j < statementsPerPartition; j++) {
batch.add(ps.bind(i, "aaaaaaaa"));
}
}
session.execute(batch);
}
use of com.datastax.driver.core.BatchStatement in project janusgraph by JanusGraph.
the class CQLStoreManager method mutateManyLogged.
// Use a single logged batch
private void mutateManyLogged(final Map<String, Map<StaticBuffer, KCVMutation>> mutations, final StoreTransaction txh) throws BackendException {
final MaskedTimestamp commitTime = new MaskedTimestamp(txh);
final BatchStatement batchStatement = new BatchStatement(Type.LOGGED);
batchStatement.setConsistencyLevel(getTransaction(txh).getWriteConsistencyLevel());
batchStatement.addAll(Iterator.ofAll(mutations.entrySet()).flatMap(tableNameAndMutations -> {
final String tableName = tableNameAndMutations.getKey();
final Map<StaticBuffer, KCVMutation> tableMutations = tableNameAndMutations.getValue();
final CQLKeyColumnValueStore columnValueStore = Option.of(this.openStores.get(tableName)).getOrElseThrow(() -> new IllegalStateException("Store cannot be found: " + tableName));
return Iterator.ofAll(tableMutations.entrySet()).flatMap(keyAndMutations -> {
final StaticBuffer key = keyAndMutations.getKey();
final KCVMutation keyMutations = keyAndMutations.getValue();
final Iterator<Statement> deletions = Iterator.of(commitTime.getDeletionTime(this.times)).flatMap(deleteTime -> Iterator.ofAll(keyMutations.getDeletions()).map(deletion -> columnValueStore.deleteColumn(key, deletion, deleteTime)));
final Iterator<Statement> additions = Iterator.of(commitTime.getAdditionTime(this.times)).flatMap(addTime -> Iterator.ofAll(keyMutations.getAdditions()).map(addition -> columnValueStore.insertColumn(key, addition, addTime)));
return Iterator.concat(deletions, additions);
});
}));
final Future<ResultSet> result = Future.fromJavaFuture(this.executorService, this.session.executeAsync(batchStatement));
result.await();
if (result.isFailure()) {
throw EXCEPTION_MAPPER.apply(result.getCause().get());
}
sleepAfterWrite(txh, commitTime);
}
use of com.datastax.driver.core.BatchStatement in project atlasdb by palantir.
the class CqlKeyValueService method getPutPartitionResultSetFuture.
protected ResultSetFuture getPutPartitionResultSetFuture(TableReference tableRef, List<Entry<Cell, Value>> partition, TransactionType transactionType, int ttl) {
PreparedStatement preparedStatement = getPreparedStatement(tableRef, getPutQueryForPossibleTransaction(tableRef, transactionType, ttl), session);
preparedStatement.setConsistencyLevel(writeConsistency);
// Be mindful when using the atomicity semantics of UNLOGGED batch statements.
// This usage should be okay, as the KVS.multiPut explicitly does not guarantee
// atomicity across cells (nor batch isolation, which we also cannot provide)
BatchStatement batchStatement = new BatchStatement(BatchStatement.Type.UNLOGGED);
if (shouldTraceQuery(tableRef)) {
batchStatement.enableTracing();
}
for (Entry<Cell, Value> e : partition) {
BoundStatement boundStatement = preparedStatement.bind();
boundStatement.setBytes(fieldNameProvider.row(), ByteBuffer.wrap(e.getKey().getRowName()));
boundStatement.setBytes(fieldNameProvider.column(), ByteBuffer.wrap(e.getKey().getColumnName()));
boundStatement.setLong(fieldNameProvider.timestamp(), ~e.getValue().getTimestamp());
boundStatement.setBytes(fieldNameProvider.value(), ByteBuffer.wrap(e.getValue().getContents()));
if (partition.size() > 1) {
batchStatement.add(boundStatement);
} else {
return session.executeAsync(boundStatement);
}
}
return session.executeAsync(batchStatement);
}
use of com.datastax.driver.core.BatchStatement in project java-driver by datastax.
the class DowngradingRetry method write.
/**
* Inserts data, retrying if necessary with a downgraded CL.
*
* @param cl the consistency level to apply.
* @param retryCount the current retry count.
* @throws DriverException if the current consistency level cannot be downgraded.
*/
private void write(ConsistencyLevel cl, int retryCount) {
System.out.printf("Writing at %s (retry count: %d)%n", cl, retryCount);
BatchStatement batch = new BatchStatement(UNLOGGED);
batch.add(new SimpleStatement("INSERT INTO downgrading.sensor_data " + "(sensor_id, date, timestamp, value) " + "VALUES (" + "756716f7-2e54-4715-9f00-91dcbea6cf50," + "'2018-02-26'," + "'2018-02-26T13:53:46.345+01:00'," + "2.34)"));
batch.add(new SimpleStatement("INSERT INTO downgrading.sensor_data " + "(sensor_id, date, timestamp, value) " + "VALUES (" + "756716f7-2e54-4715-9f00-91dcbea6cf50," + "'2018-02-26'," + "'2018-02-26T13:54:27.488+01:00'," + "2.47)"));
batch.add(new SimpleStatement("INSERT INTO downgrading.sensor_data " + "(sensor_id, date, timestamp, value) " + "VALUES (" + "756716f7-2e54-4715-9f00-91dcbea6cf50," + "'2018-02-26'," + "'2018-02-26T13:56:33.739+01:00'," + "2.52)"));
batch.setConsistencyLevel(cl);
try {
session.execute(batch);
System.out.println("Write succeeded at " + cl);
} catch (DriverException e) {
if (retryCount == maxRetries) {
throw e;
}
e = unwrapNoHostAvailableException(e);
System.out.println("Write failed: " + e);
if (e instanceof UnavailableException) {
// With an UnavailableException, we know that the write wasn't even attempted.
// Downgrade to the number of replicas reported alive and retry.
int aliveReplicas = ((UnavailableException) e).getAliveReplicas();
ConsistencyLevel downgraded = downgrade(cl, aliveReplicas, e);
write(downgraded, retryCount + 1);
} else if (e instanceof WriteTimeoutException) {
WriteType writeType = ((WriteTimeoutException) e).getWriteType();
int acknowledgements = ((WriteTimeoutException) e).getReceivedAcknowledgements();
switch(writeType) {
case SIMPLE:
case BATCH:
// a retry would ever succeed.
if (acknowledgements == 0) {
throw e;
}
break;
case UNLOGGED_BATCH:
// For unlogged batches, the write might have been persisted only partially,
// so we can't simply ignore the exception: instead, we need to retry with
// consistency level equal to the number of acknowledged writes.
ConsistencyLevel downgraded = downgrade(cl, acknowledgements, e);
write(downgraded, retryCount + 1);
break;
case BATCH_LOG:
// Rare edge case: the peers that were chosen by the coordinator
// to receive the distributed batch log failed to respond.
// Simply retry with same consistency level.
write(cl, retryCount + 1);
break;
default:
// Other write types are uncommon and should not be retried.
throw e;
}
} else {
// Unexpected error: just retry with same consistency level
// and hope to talk to a healthier coordinator.
write(cl, retryCount + 1);
}
}
}
Aggregations