use of com.datastax.oss.driver.api.core.ConsistencyLevel in project java-driver by datastax.
the class DowngradingRetry method read.
/**
* Queries data, retrying if necessary with a downgraded CL.
*
* @param cl the consistency level to apply.
* @param retryCount the current retry count.
* @throws DriverException if the current consistency level cannot be downgraded.
*/
private ResultSet read(ConsistencyLevel cl, int retryCount) {
System.out.printf("Reading at %s (retry count: %d)%n", cl, retryCount);
Statement stmt = SimpleStatement.newInstance("SELECT sensor_id, date, timestamp, value " + "FROM downgrading.sensor_data " + "WHERE " + "sensor_id = 756716f7-2e54-4715-9f00-91dcbea6cf50 AND " + "date = '2018-02-26' AND " + "timestamp > '2018-02-26+01:00'").setConsistencyLevel(cl);
try {
ResultSet rows = session.execute(stmt);
System.out.println("Read succeeded at " + cl);
return rows;
} catch (DriverException e) {
if (retryCount == maxRetries) {
throw e;
}
e = unwrapAllNodesFailedException(e);
System.out.println("Read failed: " + e);
if (e instanceof UnavailableException) {
// Downgrade to the number of replicas reported alive and retry.
int aliveReplicas = ((UnavailableException) e).getAlive();
ConsistencyLevel downgraded = downgrade(cl, aliveReplicas, e);
return read(downgraded, retryCount + 1);
} else if (e instanceof ReadTimeoutException) {
ReadTimeoutException readTimeout = (ReadTimeoutException) e;
int received = readTimeout.getReceived();
int required = readTimeout.getBlockFor();
// equal to the number of received acknowledgements.
if (received < required) {
ConsistencyLevel downgraded = downgrade(cl, received, e);
return read(downgraded, retryCount + 1);
}
// and get the data back.
if (!readTimeout.wasDataPresent()) {
return read(cl, retryCount + 1);
}
// Otherwise, abort since the read timeout is unlikely to be solved by a retry.
throw e;
} else {
// and hope to talk to a healthier coordinator.
return read(cl, retryCount + 1);
}
}
}
use of com.datastax.oss.driver.api.core.ConsistencyLevel in project java-driver by datastax.
the class BasicLoadBalancingPolicy method maybeAddDcFailover.
@NonNull
protected Queue<Node> maybeAddDcFailover(@Nullable Request request, @NonNull Queue<Node> local) {
if (maxNodesPerRemoteDc <= 0 || localDc == null) {
return local;
}
if (!allowDcFailoverForLocalCl && request instanceof Statement) {
Statement<?> statement = (Statement<?>) request;
ConsistencyLevel consistency = statement.getConsistencyLevel();
if (consistency == null) {
consistency = defaultConsistencyLevel;
}
if (consistency.isDcLocal()) {
return local;
}
}
QueryPlan remote = new LazyQueryPlan() {
@Override
protected Object[] computeNodes() {
Object[] remoteNodes = liveNodes.dcs().stream().filter(Predicates.not(Predicates.equalTo(localDc))).flatMap(dc -> liveNodes.dc(dc).stream().limit(maxNodesPerRemoteDc)).toArray();
int remoteNodesLength = remoteNodes.length;
if (remoteNodesLength == 0) {
return EMPTY_NODES;
}
shuffleHead(remoteNodes, remoteNodesLength);
return remoteNodes;
}
};
return new CompositeQueryPlan(local, remote);
}
use of com.datastax.oss.driver.api.core.ConsistencyLevel in project java-driver by datastax.
the class DowngradingRetry method write.
/**
* Inserts data, retrying if necessary with a downgraded CL.
*
* @param cl the consistency level to apply.
* @param retryCount the current retry count.
* @throws DriverException if the current consistency level cannot be downgraded.
*/
private void write(ConsistencyLevel cl, int retryCount) {
System.out.printf("Writing at %s (retry count: %d)%n", cl, retryCount);
BatchStatement batch = BatchStatement.newInstance(UNLOGGED).add(SimpleStatement.newInstance("INSERT INTO downgrading.sensor_data " + "(sensor_id, date, timestamp, value) " + "VALUES (" + "756716f7-2e54-4715-9f00-91dcbea6cf50," + "'2018-02-26'," + "'2018-02-26T13:53:46.345+01:00'," + "2.34)")).add(SimpleStatement.newInstance("INSERT INTO downgrading.sensor_data " + "(sensor_id, date, timestamp, value) " + "VALUES (" + "756716f7-2e54-4715-9f00-91dcbea6cf50," + "'2018-02-26'," + "'2018-02-26T13:54:27.488+01:00'," + "2.47)")).add(SimpleStatement.newInstance("INSERT INTO downgrading.sensor_data " + "(sensor_id, date, timestamp, value) " + "VALUES (" + "756716f7-2e54-4715-9f00-91dcbea6cf50," + "'2018-02-26'," + "'2018-02-26T13:56:33.739+01:00'," + "2.52)")).setConsistencyLevel(cl);
try {
session.execute(batch);
System.out.println("Write succeeded at " + cl);
} catch (DriverException e) {
if (retryCount == maxRetries) {
throw e;
}
e = unwrapAllNodesFailedException(e);
System.out.println("Write failed: " + e);
if (e instanceof UnavailableException) {
// With an UnavailableException, we know that the write wasn't even attempted.
// Downgrade to the number of replicas reported alive and retry.
int aliveReplicas = ((UnavailableException) e).getAlive();
ConsistencyLevel downgraded = downgrade(cl, aliveReplicas, e);
write(downgraded, retryCount + 1);
} else if (e instanceof WriteTimeoutException) {
DefaultWriteType writeType = (DefaultWriteType) ((WriteTimeoutException) e).getWriteType();
int acknowledgements = ((WriteTimeoutException) e).getReceived();
switch(writeType) {
case SIMPLE:
case BATCH:
// a retry would ever succeed.
if (acknowledgements == 0) {
throw e;
}
break;
case UNLOGGED_BATCH:
// For unlogged batches, the write might have been persisted only partially,
// so we can't simply ignore the exception: instead, we need to retry with
// consistency level equal to the number of acknowledged writes.
ConsistencyLevel downgraded = downgrade(cl, acknowledgements, e);
write(downgraded, retryCount + 1);
break;
case BATCH_LOG:
// Rare edge case: the peers that were chosen by the coordinator
// to receive the distributed batch log failed to respond.
// Simply retry with same consistency level.
write(cl, retryCount + 1);
break;
default:
// Other write types are uncommon and should not be retried.
throw e;
}
} else {
// Unexpected error: just retry with same consistency level
// and hope to talk to a healthier coordinator.
write(cl, retryCount + 1);
}
}
}
use of com.datastax.oss.driver.api.core.ConsistencyLevel in project java-driver by datastax.
the class DseConversions method toContinuousPagingMessage.
public static Message toContinuousPagingMessage(Statement<?> statement, DriverExecutionProfile config, InternalDriverContext context) {
ConsistencyLevelRegistry consistencyLevelRegistry = context.getConsistencyLevelRegistry();
ConsistencyLevel consistency = statement.getConsistencyLevel();
int consistencyCode = (consistency == null) ? consistencyLevelRegistry.nameToCode(config.getString(DefaultDriverOption.REQUEST_CONSISTENCY)) : consistency.getProtocolCode();
int pageSize = config.getInt(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE);
boolean pageSizeInBytes = config.getBoolean(DseDriverOption.CONTINUOUS_PAGING_PAGE_SIZE_BYTES);
int maxPages = config.getInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES);
int maxPagesPerSecond = config.getInt(DseDriverOption.CONTINUOUS_PAGING_MAX_PAGES_PER_SECOND);
int maxEnqueuedPages = config.getInt(DseDriverOption.CONTINUOUS_PAGING_MAX_ENQUEUED_PAGES);
ContinuousPagingOptions options = new ContinuousPagingOptions(maxPages, maxPagesPerSecond, maxEnqueuedPages);
ConsistencyLevel serialConsistency = statement.getSerialConsistencyLevel();
int serialConsistencyCode = (serialConsistency == null) ? consistencyLevelRegistry.nameToCode(config.getString(DefaultDriverOption.REQUEST_SERIAL_CONSISTENCY)) : serialConsistency.getProtocolCode();
long timestamp = statement.getQueryTimestamp();
if (timestamp == Statement.NO_DEFAULT_TIMESTAMP) {
timestamp = context.getTimestampGenerator().next();
}
CodecRegistry codecRegistry = context.getCodecRegistry();
ProtocolVersion protocolVersion = context.getProtocolVersion();
ProtocolVersionRegistry protocolVersionRegistry = context.getProtocolVersionRegistry();
CqlIdentifier keyspace = statement.getKeyspace();
if (statement instanceof SimpleStatement) {
SimpleStatement simpleStatement = (SimpleStatement) statement;
List<Object> positionalValues = simpleStatement.getPositionalValues();
Map<CqlIdentifier, Object> namedValues = simpleStatement.getNamedValues();
if (!positionalValues.isEmpty() && !namedValues.isEmpty()) {
throw new IllegalArgumentException("Can't have both positional and named values in a statement.");
}
if (keyspace != null && !protocolVersionRegistry.supports(protocolVersion, DefaultProtocolFeature.PER_REQUEST_KEYSPACE)) {
throw new IllegalArgumentException("Can't use per-request keyspace with protocol " + protocolVersion);
}
DseQueryOptions queryOptions = new DseQueryOptions(consistencyCode, Conversions.encode(positionalValues, codecRegistry, protocolVersion), Conversions.encode(namedValues, codecRegistry, protocolVersion), false, pageSize, statement.getPagingState(), serialConsistencyCode, timestamp, (keyspace == null) ? null : keyspace.asInternal(), pageSizeInBytes, options);
return new Query(simpleStatement.getQuery(), queryOptions);
} else if (statement instanceof BoundStatement) {
BoundStatement boundStatement = (BoundStatement) statement;
if (!protocolVersionRegistry.supports(protocolVersion, DefaultProtocolFeature.UNSET_BOUND_VALUES)) {
Conversions.ensureAllSet(boundStatement);
}
boolean skipMetadata = boundStatement.getPreparedStatement().getResultSetDefinitions().size() > 0;
DseQueryOptions queryOptions = new DseQueryOptions(consistencyCode, boundStatement.getValues(), Collections.emptyMap(), skipMetadata, pageSize, statement.getPagingState(), serialConsistencyCode, timestamp, null, pageSizeInBytes, options);
PreparedStatement preparedStatement = boundStatement.getPreparedStatement();
ByteBuffer id = preparedStatement.getId();
ByteBuffer resultMetadataId = preparedStatement.getResultMetadataId();
return new Execute(Bytes.getArray(id), (resultMetadataId == null) ? null : Bytes.getArray(resultMetadataId), queryOptions);
} else {
throw new IllegalArgumentException("Unsupported statement type: " + statement.getClass().getName());
}
}
use of com.datastax.oss.driver.api.core.ConsistencyLevel in project java-driver by datastax.
the class GraphConversions method createCustomPayload.
public static Map<String, ByteBuffer> createCustomPayload(GraphStatement<?> statement, GraphProtocol subProtocol, DriverExecutionProfile config, InternalDriverContext context, GraphBinaryModule graphBinaryModule) {
ProtocolVersion protocolVersion = context.getProtocolVersion();
NullAllowingImmutableMap.Builder<String, ByteBuffer> payload = NullAllowingImmutableMap.builder();
Map<String, ByteBuffer> statementOptions = statement.getCustomPayload();
payload.putAll(statementOptions);
final String graphLanguage;
// Don't override anything that's already provided at the statement level
if (!statementOptions.containsKey(GRAPH_LANG_OPTION_KEY)) {
graphLanguage = statement instanceof ScriptGraphStatement ? LANGUAGE_GROOVY : LANGUAGE_BYTECODE;
payload.put(GRAPH_LANG_OPTION_KEY, TypeCodecs.TEXT.encode(graphLanguage, protocolVersion));
} else {
graphLanguage = TypeCodecs.TEXT.decode(statementOptions.get(GRAPH_LANG_OPTION_KEY), protocolVersion);
Preconditions.checkNotNull(graphLanguage, "A null value was set for the graph-language custom payload key.");
}
if (!isSystemQuery(statement, config)) {
if (!statementOptions.containsKey(GRAPH_NAME_OPTION_KEY)) {
String graphName = statement.getGraphName();
if (graphName == null) {
graphName = config.getString(DseDriverOption.GRAPH_NAME, null);
}
if (graphName != null) {
payload.put(GRAPH_NAME_OPTION_KEY, TypeCodecs.TEXT.encode(graphName, protocolVersion));
}
}
if (!statementOptions.containsKey(GRAPH_SOURCE_OPTION_KEY)) {
String traversalSource = statement.getTraversalSource();
if (traversalSource == null) {
traversalSource = config.getString(DseDriverOption.GRAPH_TRAVERSAL_SOURCE, null);
}
if (traversalSource != null) {
payload.put(GRAPH_SOURCE_OPTION_KEY, TypeCodecs.TEXT.encode(traversalSource, protocolVersion));
}
}
}
// the payload allows null entry values so doing a get directly here and checking for null
final ByteBuffer payloadInitialProtocol = statementOptions.get(GRAPH_RESULTS_OPTION_KEY);
if (payloadInitialProtocol == null) {
Preconditions.checkNotNull(subProtocol);
payload.put(GRAPH_RESULTS_OPTION_KEY, TypeCodecs.TEXT.encode(subProtocol.toInternalCode(), protocolVersion));
} else {
subProtocol = GraphProtocol.fromString(TypeCodecs.TEXT.decode(payloadInitialProtocol, protocolVersion));
}
if (subProtocol.isGraphBinary() && graphLanguage.equals(LANGUAGE_BYTECODE)) {
Object bytecodeQuery = bytecodeToSerialize(statement);
try {
Buffer bytecodeByteBuf = graphBinaryModule.serialize(bytecodeQuery);
payload.put(GRAPH_BINARY_QUERY_OPTION_KEY, bytecodeByteBuf.nioBuffer());
bytecodeByteBuf.release();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
if (!statementOptions.containsKey(GRAPH_READ_CONSISTENCY_LEVEL_OPTION_KEY)) {
ConsistencyLevel readCl = statement.getReadConsistencyLevel();
String readClString = readCl != null ? readCl.name() : config.getString(DseDriverOption.GRAPH_READ_CONSISTENCY_LEVEL, null);
if (readClString != null) {
payload.put(GRAPH_READ_CONSISTENCY_LEVEL_OPTION_KEY, TypeCodecs.TEXT.encode(readClString, protocolVersion));
}
}
if (!statementOptions.containsKey(GRAPH_WRITE_CONSISTENCY_LEVEL_OPTION_KEY)) {
ConsistencyLevel writeCl = statement.getWriteConsistencyLevel();
String writeClString = writeCl != null ? writeCl.name() : config.getString(DseDriverOption.GRAPH_WRITE_CONSISTENCY_LEVEL, null);
if (writeClString != null) {
payload.put(GRAPH_WRITE_CONSISTENCY_LEVEL_OPTION_KEY, TypeCodecs.TEXT.encode(writeClString, protocolVersion));
}
}
if (!statementOptions.containsKey(GRAPH_TIMEOUT_OPTION_KEY)) {
Duration timeout = statement.getTimeout();
if (timeout == null) {
timeout = config.getDuration(DseDriverOption.GRAPH_TIMEOUT, Duration.ZERO);
}
if (timeout != null && !timeout.isZero()) {
payload.put(GRAPH_TIMEOUT_OPTION_KEY, TypeCodecs.BIGINT.encode(timeout.toMillis(), protocolVersion));
}
}
return payload.build();
}
Aggregations