use of org.apache.cassandra.utils.concurrent.UncheckedInterruptedException in project cassandra by apache.
the class FBUtilities method exec.
/**
* Starts and waits for the given @param pb to finish.
* @throws java.io.IOException on non-zero exit code
*/
public static void exec(ProcessBuilder pb) throws IOException {
Process p = pb.start();
try {
int errCode = p.waitFor();
if (errCode != 0) {
try (BufferedReader in = new BufferedReader(new InputStreamReader(p.getInputStream()));
BufferedReader err = new BufferedReader(new InputStreamReader(p.getErrorStream()))) {
String lineSep = LINE_SEPARATOR.getString();
StringBuilder sb = new StringBuilder();
String str;
while ((str = in.readLine()) != null) sb.append(str).append(lineSep);
while ((str = err.readLine()) != null) sb.append(str).append(lineSep);
throw new IOException("Exception while executing the command: " + StringUtils.join(pb.command(), " ") + ", command error Code: " + errCode + ", command output: " + sb.toString());
}
}
} catch (InterruptedException e) {
throw new UncheckedInterruptedException(e);
}
}
use of org.apache.cassandra.utils.concurrent.UncheckedInterruptedException in project cassandra by apache.
the class JVMStabilityInspector method inspectThrowable.
public static void inspectThrowable(Throwable t, Consumer<Throwable> fn) throws OutOfMemoryError {
boolean isUnstable = false;
if (t instanceof OutOfMemoryError) {
if (Boolean.getBoolean("cassandra.printHeapHistogramOnOutOfMemoryError")) {
// time span.
synchronized (lock) {
if (printingHeapHistogram)
return;
printingHeapHistogram = true;
}
HeapUtils.logHeapHistogram();
}
logger.error("OutOfMemory error letting the JVM handle the error:", t);
StorageService.instance.removeShutdownHook();
forceHeapSpaceOomMaybe((OutOfMemoryError) t);
// the JVM behavior in case of OOM (CASSANDRA-13006).
throw (OutOfMemoryError) t;
} else if (t instanceof UnrecoverableIllegalStateException) {
isUnstable = true;
}
if (t instanceof InterruptedException)
throw new UncheckedInterruptedException((InterruptedException) t);
if (DatabaseDescriptor.getDiskFailurePolicy() == Config.DiskFailurePolicy.die)
if (t instanceof FSError || t instanceof CorruptSSTableException)
isUnstable = true;
// Check for file handle exhaustion
if (t instanceof FileNotFoundException || t instanceof FileSystemException || t instanceof SocketException)
if (t.getMessage() != null && t.getMessage().contains("Too many open files"))
isUnstable = true;
if (isUnstable) {
if (!StorageService.instance.isDaemonSetupCompleted())
FileUtils.handleStartupFSError(t);
killer.killCurrentJVM(t);
}
try {
fn.accept(t);
} catch (Exception | Error e) {
logger.warn("Unexpected error while handling unexpected error", e);
}
if (t.getCause() != null)
inspectThrowable(t.getCause(), fn);
}
use of org.apache.cassandra.utils.concurrent.UncheckedInterruptedException in project cassandra by apache.
the class AbstractWriteResponseHandler method get.
public void get() throws WriteTimeoutException, WriteFailureException {
long timeoutNanos = currentTimeoutNanos();
boolean success;
try {
success = condition.await(timeoutNanos, NANOSECONDS);
} catch (InterruptedException e) {
throw new UncheckedInterruptedException(e);
}
if (!success) {
int blockedFor = blockFor();
int acks = ackCount();
// avoid sending confusing info to the user (see CASSANDRA-6491).
if (acks >= blockedFor)
acks = blockedFor - 1;
throw new WriteTimeoutException(writeType, replicaPlan.consistencyLevel(), acks, blockedFor);
}
if (blockFor() + failures > candidateReplicaCount()) {
throw new WriteFailureException(replicaPlan.consistencyLevel(), ackCount(), blockFor(), writeType, failureReasonByEndpoint);
}
}
use of org.apache.cassandra.utils.concurrent.UncheckedInterruptedException in project cassandra by apache.
the class SimpleClient method execute.
public Map<Message.Request, Message.Response> execute(List<Message.Request> requests) {
try {
Map<Message.Request, Message.Response> rrMap = new HashMap<>();
if (version.isGreaterOrEqualTo(ProtocolVersion.V5)) {
for (int i = 0; i < requests.size(); i++) {
Message.Request message = requests.get(i);
message.setStreamId(i);
message.attach(connection);
}
lastWriteFuture = channel.writeAndFlush(requests);
long deadline = currentTimeMillis() + TimeUnit.SECONDS.toMillis(TIMEOUT_SECONDS);
for (int i = 0; i < requests.size(); i++) {
Message.Response msg = responseHandler.responses.poll(deadline - currentTimeMillis(), TimeUnit.MILLISECONDS);
if (msg == null)
throw new RuntimeException("timeout");
if (msg instanceof ErrorMessage)
throw new RuntimeException((Throwable) ((ErrorMessage) msg).error);
rrMap.put(requests.get(msg.getStreamId()), msg);
}
} else {
// V4 doesn't support batching
for (Message.Request request : requests) rrMap.put(request, execute(request));
}
return rrMap;
} catch (InterruptedException e) {
throw new UncheckedInterruptedException(e);
}
}
use of org.apache.cassandra.utils.concurrent.UncheckedInterruptedException in project cassandra by apache.
the class FBUtilities method waitOnFirstFuture.
/**
* Only wait for the first future to finish from a list of futures. Will block until at least 1 future finishes.
* @param futures The futures to wait on
* @return future that completed.
*/
public static <T, F extends Future<? extends T>> F waitOnFirstFuture(Iterable<? extends F> futures, long delay) {
while (true) {
Iterator<? extends F> iter = futures.iterator();
if (!iter.hasNext())
throw new IllegalArgumentException();
while (true) {
F f = iter.next();
boolean isDone;
if ((isDone = f.isDone()) || !iter.hasNext()) {
try {
f.get(delay, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
throw new UncheckedInterruptedException(e);
} catch (ExecutionException e) {
throw new RuntimeException(e);
} catch (TimeoutException e) {
if (// prevent infinite loops on bad implementations (not encountered)
!isDone)
break;
}
return f;
}
}
}
}
Aggregations