use of com.datastax.driver.core.exceptions.NoHostAvailableException in project java-driver by datastax.
the class MapperReconnectionTest method should_not_keep_failed_future_in_query_cache.
/**
* Ensures that when the driver looses connectivity,
* if a mapper query preparation is attempted in the meanwhile,
* the failed future will not be kept in cache, so that when
* connectivity comes back again,
* the same query preparation can be reattempted.
*
* @jira_ticket JAVA-1283
* @test_category object_mapper
*/
@Test(groups = "long")
public void should_not_keep_failed_future_in_query_cache() throws Exception {
MappingManager manager = new MappingManager(session());
Mapper<User> m = manager.mapper(User.class);
User u1 = new User("Paul", "paul@gmail.com");
m.save(u1);
ccm().stop(1);
ccm().waitForDown(1);
waitForDown(ipOfNode(1), cluster());
try {
m.get(u1.getUserId());
fail("Should have thrown NoHostAvailableException");
} catch (NoHostAvailableException e) {
// ok
}
ccm().start(1);
ccm().waitForUp(1);
waitForUp(ipOfNode(1), cluster());
try {
m.get(u1.getUserId());
} catch (NoHostAvailableException e) {
fail("Should not have thrown NoHostAvailableException");
}
}
use of com.datastax.driver.core.exceptions.NoHostAvailableException in project cassandra by apache.
the class BulkLoaderTest method testBulkLoader_WithArgs1.
@Test
public void testBulkLoader_WithArgs1() throws Exception {
ToolResult tool = ToolRunner.invokeClass(BulkLoader.class, "-d", "127.9.9.1", "--port", "9042", OfflineToolUtils.sstableDirName("legacy_sstables", "legacy_ma_simple"));
assertEquals(-1, tool.getExitCode());
if (!(tool.getException().getCause() instanceof BulkLoadException))
throw tool.getException();
if (!(tool.getException().getCause().getCause() instanceof NoHostAvailableException))
throw tool.getException();
assertNoUnexpectedThreadsStarted(new String[] { "ObjectCleanerThread", "globalEventExecutor-[1-9]-[1-9]", "globalEventExecutor-[1-9]-[1-9]", "Shutdown-checker", "cluster[0-9]-connection-reaper-[0-9]" });
assertSchemaNotLoaded();
assertCLSMNotLoaded();
assertSystemKSNotLoaded();
assertKeyspaceNotLoaded();
assertServerNotLoaded();
}
use of com.datastax.driver.core.exceptions.NoHostAvailableException in project cassandra by apache.
the class BulkLoaderTest method testBulkLoader_WithArgs2.
@Test
public void testBulkLoader_WithArgs2() throws Exception {
ToolResult tool = ToolRunner.invokeClass(BulkLoader.class, "-d", "127.9.9.1:9042", "--port", "9041", OfflineToolUtils.sstableDirName("legacy_sstables", "legacy_ma_simple"));
assertEquals(-1, tool.getExitCode());
if (!(tool.getException().getCause() instanceof BulkLoadException))
throw tool.getException();
if (!(tool.getException().getCause().getCause() instanceof NoHostAvailableException))
throw tool.getException();
assertNoUnexpectedThreadsStarted(new String[] { "ObjectCleanerThread", "globalEventExecutor-[1-9]-[1-9]", "globalEventExecutor-[1-9]-[1-9]", "Shutdown-checker", "cluster[0-9]-connection-reaper-[0-9]" });
assertSchemaNotLoaded();
assertCLSMNotLoaded();
assertSystemKSNotLoaded();
assertKeyspaceNotLoaded();
assertServerNotLoaded();
}
use of com.datastax.driver.core.exceptions.NoHostAvailableException in project beam by apache.
the class CassandraIOTest method buildCluster.
private static Cluster buildCluster(CassandraEmbeddedServerBuilder builder) {
int tried = 0;
int delay = 5000;
Exception exception = null;
while (tried < 5) {
try {
return builder.buildNativeCluster();
} catch (NoHostAvailableException e) {
if (exception == null) {
exception = e;
} else {
exception.addSuppressed(e);
}
tried++;
try {
Thread.sleep(delay);
} catch (InterruptedException e1) {
Thread thread = Thread.currentThread();
thread.interrupt();
throw new RuntimeException(String.format("Thread %s was interrupted", thread.getName()));
}
}
}
throw new RuntimeException(String.format("Unable to create embedded Cassandra cluster: tried %d times with %d delay", tried, delay), exception);
}
use of com.datastax.driver.core.exceptions.NoHostAvailableException in project nifi by apache.
the class QueryCassandra method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
FlowFile fileToProcess = null;
if (context.hasIncomingConnection()) {
fileToProcess = session.get();
// we know that we should run only if we have a FlowFile.
if (fileToProcess == null && context.hasNonLoopConnection()) {
return;
}
}
final ComponentLog logger = getLogger();
final String selectQuery = context.getProperty(CQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess).getValue();
final long queryTimeout = context.getProperty(QUERY_TIMEOUT).evaluateAttributeExpressions(fileToProcess).asTimePeriod(TimeUnit.MILLISECONDS);
final String outputFormat = context.getProperty(OUTPUT_FORMAT).getValue();
final Charset charset = Charset.forName(context.getProperty(CHARSET).evaluateAttributeExpressions(fileToProcess).getValue());
final StopWatch stopWatch = new StopWatch(true);
if (fileToProcess == null) {
fileToProcess = session.create();
}
try {
// The documentation for the driver recommends the session remain open the entire time the processor is running
// and states that it is thread-safe. This is why connectionSession is not in a try-with-resources.
final Session connectionSession = cassandraSession.get();
final ResultSetFuture queryFuture = connectionSession.executeAsync(selectQuery);
final AtomicLong nrOfRows = new AtomicLong(0L);
fileToProcess = session.write(fileToProcess, new OutputStreamCallback() {
@Override
public void process(final OutputStream out) throws IOException {
try {
logger.debug("Executing CQL query {}", new Object[] { selectQuery });
final ResultSet resultSet;
if (queryTimeout > 0) {
resultSet = queryFuture.getUninterruptibly(queryTimeout, TimeUnit.MILLISECONDS);
if (AVRO_FORMAT.equals(outputFormat)) {
nrOfRows.set(convertToAvroStream(resultSet, out, queryTimeout, TimeUnit.MILLISECONDS));
} else if (JSON_FORMAT.equals(outputFormat)) {
nrOfRows.set(convertToJsonStream(resultSet, out, charset, queryTimeout, TimeUnit.MILLISECONDS));
}
} else {
resultSet = queryFuture.getUninterruptibly();
if (AVRO_FORMAT.equals(outputFormat)) {
nrOfRows.set(convertToAvroStream(resultSet, out, 0, null));
} else if (JSON_FORMAT.equals(outputFormat)) {
nrOfRows.set(convertToJsonStream(resultSet, out, charset, 0, null));
}
}
} catch (final TimeoutException | InterruptedException | ExecutionException e) {
throw new ProcessException(e);
}
}
});
// set attribute how many rows were selected
fileToProcess = session.putAttribute(fileToProcess, RESULT_ROW_COUNT, String.valueOf(nrOfRows.get()));
// set mime.type based on output format
fileToProcess = session.putAttribute(fileToProcess, CoreAttributes.MIME_TYPE.key(), JSON_FORMAT.equals(outputFormat) ? "application/json" : "application/avro-binary");
logger.info("{} contains {} Avro records; transferring to 'success'", new Object[] { fileToProcess, nrOfRows.get() });
session.getProvenanceReporter().modifyContent(fileToProcess, "Retrieved " + nrOfRows.get() + " rows", stopWatch.getElapsed(TimeUnit.MILLISECONDS));
session.transfer(fileToProcess, REL_SUCCESS);
} catch (final NoHostAvailableException nhae) {
getLogger().error("No host in the Cassandra cluster can be contacted successfully to execute this query", nhae);
// Log up to 10 error messages. Otherwise if a 1000-node cluster was specified but there was no connectivity,
// a thousand error messages would be logged. However we would like information from Cassandra itself, so
// cap the error limit at 10, format the messages, and don't include the stack trace (it is displayed by the
// logger message above).
getLogger().error(nhae.getCustomMessage(10, true, false));
fileToProcess = session.penalize(fileToProcess);
session.transfer(fileToProcess, REL_RETRY);
} catch (final QueryExecutionException qee) {
logger.error("Cannot execute the query with the requested consistency level successfully", qee);
fileToProcess = session.penalize(fileToProcess);
session.transfer(fileToProcess, REL_RETRY);
} catch (final QueryValidationException qve) {
if (context.hasIncomingConnection()) {
logger.error("The CQL query {} is invalid due to syntax error, authorization issue, or another " + "validation problem; routing {} to failure", new Object[] { selectQuery, fileToProcess }, qve);
fileToProcess = session.penalize(fileToProcess);
session.transfer(fileToProcess, REL_FAILURE);
} else {
// This can happen if any exceptions occur while setting up the connection, statement, etc.
logger.error("The CQL query {} is invalid due to syntax error, authorization issue, or another " + "validation problem", new Object[] { selectQuery }, qve);
session.remove(fileToProcess);
context.yield();
}
} catch (final ProcessException e) {
if (context.hasIncomingConnection()) {
logger.error("Unable to execute CQL select query {} for {} due to {}; routing to failure", new Object[] { selectQuery, fileToProcess, e });
fileToProcess = session.penalize(fileToProcess);
session.transfer(fileToProcess, REL_FAILURE);
} else {
logger.error("Unable to execute CQL select query {} due to {}", new Object[] { selectQuery, e });
session.remove(fileToProcess);
context.yield();
}
}
}
Aggregations