use of java.util.concurrent.CancellationException in project cassandra by apache.
the class StreamTransferTaskTest method testScheduleTimeout.
@Test
public void testScheduleTimeout() throws Exception {
InetAddress peer = FBUtilities.getBroadcastAddress();
StreamSession session = new StreamSession(peer, peer, null, 0, true, false, null);
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD);
// create two sstables
for (int i = 0; i < 2; i++) {
SchemaLoader.insertData(KEYSPACE1, CF_STANDARD, i, 1);
cfs.forceBlockingFlush();
}
// create streaming task that streams those two sstables
StreamTransferTask task = new StreamTransferTask(session, cfs.metadata.id);
for (SSTableReader sstable : cfs.getLiveSSTables()) {
List<Range<Token>> ranges = new ArrayList<>();
ranges.add(new Range<>(sstable.first.getToken(), sstable.last.getToken()));
task.addTransferFile(sstable.selfRef(), 1, sstable.getPositionsForRanges(ranges), 0);
}
assertEquals(2, task.getTotalNumberOfFiles());
// if file sending completes before timeout then the task should be canceled.
Future f = task.scheduleTimeout(0, 0, TimeUnit.NANOSECONDS);
f.get();
// when timeout runs on second file, task should be completed
f = task.scheduleTimeout(1, 10, TimeUnit.MILLISECONDS);
task.complete(1);
try {
f.get();
Assert.assertTrue(false);
} catch (CancellationException ex) {
}
assertEquals(StreamSession.State.WAIT_COMPLETE, session.state());
// when all streaming are done, time out task should not be scheduled.
assertNull(task.scheduleTimeout(1, 1, TimeUnit.SECONDS));
}
use of java.util.concurrent.CancellationException in project hadoop by apache.
the class AbstractFuture method cancellationExceptionWithCause.
private static CancellationException cancellationExceptionWithCause(@Nullable String message, @Nullable Throwable cause) {
CancellationException exception = new CancellationException(message);
exception.initCause(cause);
return exception;
}
use of java.util.concurrent.CancellationException in project buck by facebook.
the class AdbHelper method adbCall.
/**
* Execute an {@link AdbCallable} for all matching devices. This functions performs device
* filtering based on three possible arguments:
*
* -e (emulator-only) - only emulators are passing the filter
* -d (device-only) - only real devices are passing the filter
* -s (serial) - only device/emulator with specific serial number are passing the filter
*
* If more than one device matches the filter this function will fail unless multi-install
* mode is enabled (-x). This flag is used as a marker that user understands that multiple
* devices will be used to install the apk if needed.
*/
@SuppressWarnings("PMD.EmptyCatchBlock")
@SuppressForbidden
public boolean adbCall(AdbCallable adbCallable, boolean quiet) throws InterruptedException {
List<IDevice> devices;
try (SimplePerfEvent.Scope ignored = SimplePerfEvent.scope(buckEventBus, "set_up_adb_call")) {
devices = getDevices(quiet);
if (devices.size() == 0) {
return false;
}
}
int adbThreadCount = options.getAdbThreadCount();
if (adbThreadCount <= 0) {
adbThreadCount = devices.size();
}
// Start executions on all matching devices.
List<ListenableFuture<Boolean>> futures = Lists.newArrayList();
ListeningExecutorService executorService = listeningDecorator(newMultiThreadExecutor(new CommandThreadFactory(getClass().getSimpleName()), adbThreadCount));
for (final IDevice device : devices) {
futures.add(executorService.submit(adbCallable.forDevice(device)));
}
// Wait for all executions to complete or fail.
List<Boolean> results = null;
try {
results = Futures.allAsList(futures).get();
} catch (ExecutionException ex) {
console.printBuildFailure("Failed: " + adbCallable);
ex.printStackTrace(console.getStdErr());
return false;
} catch (InterruptedException e) {
try {
Futures.allAsList(futures).cancel(true);
} catch (CancellationException ignored) {
// Rethrow original InterruptedException instead.
}
Thread.currentThread().interrupt();
throw e;
} finally {
MostExecutors.shutdownOrThrow(executorService, 10, TimeUnit.MINUTES, new InterruptionFailedException("Failed to shutdown ExecutorService."));
}
int successCount = 0;
for (Boolean result : results) {
if (result) {
successCount++;
}
}
int failureCount = results.size() - successCount;
// Report results.
if (successCount > 0 && !quiet) {
console.printSuccess(String.format("Successfully ran %s on %d device(s)", adbCallable, successCount));
}
if (failureCount > 0) {
console.printBuildFailure(String.format("Failed to %s on %d device(s).", adbCallable, failureCount));
}
return failureCount == 0;
}
use of java.util.concurrent.CancellationException in project buck by facebook.
the class TestRunning method runTests.
@SuppressWarnings("PMD.EmptyCatchBlock")
public static int runTests(final CommandRunnerParams params, Iterable<TestRule> tests, ExecutionContext executionContext, final TestRunningOptions options, ListeningExecutorService service, BuildEngine buildEngine, final StepRunner stepRunner, SourcePathResolver sourcePathResolver, SourcePathRuleFinder ruleFinder) throws IOException, ExecutionException, InterruptedException {
ImmutableSet<JavaLibrary> rulesUnderTestForCoverage;
// If needed, we first run instrumentation on the class files.
if (options.isCodeCoverageEnabled()) {
rulesUnderTestForCoverage = getRulesUnderTest(tests);
if (!rulesUnderTestForCoverage.isEmpty()) {
try {
// We'll use the filesystem of the first rule under test. This will fail if there are any
// tests from a different repo, but it'll help us bootstrap ourselves to being able to
// support multiple repos
// TODO(t8220837): Support tests in multiple repos
JavaLibrary library = rulesUnderTestForCoverage.iterator().next();
stepRunner.runStepForBuildTarget(executionContext, new MakeCleanDirectoryStep(library.getProjectFilesystem(), JacocoConstants.getJacocoOutputDir(library.getProjectFilesystem())), Optional.empty());
} catch (StepFailedException e) {
params.getBuckEventBus().post(ConsoleEvent.severe(Throwables.getRootCause(e).getLocalizedMessage()));
return 1;
}
}
} else {
rulesUnderTestForCoverage = ImmutableSet.of();
}
final ImmutableSet<String> testTargets = FluentIterable.from(tests).transform(BuildRule::getBuildTarget).transform(Object::toString).toSet();
final int totalNumberOfTests = Iterables.size(tests);
params.getBuckEventBus().post(TestRunEvent.started(options.isRunAllTests(), options.getTestSelectorList(), options.shouldExplainTestSelectorList(), testTargets));
// Start running all of the tests. The result of each java_test() rule is represented as a
// ListenableFuture.
List<ListenableFuture<TestResults>> results = Lists.newArrayList();
TestRuleKeyFileHelper testRuleKeyFileHelper = new TestRuleKeyFileHelper(buildEngine);
final AtomicInteger lastReportedTestSequenceNumber = new AtomicInteger();
final List<TestRun> separateTestRuns = Lists.newArrayList();
List<TestRun> parallelTestRuns = Lists.newArrayList();
for (final TestRule test : tests) {
// Determine whether the test needs to be executed.
final Callable<TestResults> resultsInterpreter = getCachingCallable(test.interpretTestResults(executionContext, /*isUsingTestSelectors*/
!options.getTestSelectorList().isEmpty()));
boolean isTestRunRequired;
isTestRunRequired = isTestRunRequiredForTest(test, buildEngine, executionContext, testRuleKeyFileHelper, options.getTestResultCacheMode(), resultsInterpreter, !options.getTestSelectorList().isEmpty(), !options.getEnvironmentOverrides().isEmpty());
final Map<String, UUID> testUUIDMap = new HashMap<>();
final AtomicReference<TestStatusMessageEvent.Started> currentTestStatusMessageEvent = new AtomicReference<>();
TestRule.TestReportingCallback testReportingCallback = new TestRule.TestReportingCallback() {
@Override
public void testsDidBegin() {
LOG.debug("Tests for rule %s began", test.getBuildTarget());
}
@Override
public void statusDidBegin(TestStatusMessage didBeginMessage) {
LOG.debug("Test status did begin: %s", didBeginMessage);
TestStatusMessageEvent.Started startedEvent = TestStatusMessageEvent.started(didBeginMessage);
TestStatusMessageEvent.Started previousEvent = currentTestStatusMessageEvent.getAndSet(startedEvent);
Preconditions.checkState(previousEvent == null, "Received begin status before end status (%s)", previousEvent);
params.getBuckEventBus().post(startedEvent);
String message = didBeginMessage.getMessage();
if (message.toLowerCase().contains("debugger")) {
executionContext.getStdErr().println(executionContext.getAnsi().asWarningText(message));
}
}
@Override
public void statusDidEnd(TestStatusMessage didEndMessage) {
LOG.debug("Test status did end: %s", didEndMessage);
TestStatusMessageEvent.Started previousEvent = currentTestStatusMessageEvent.getAndSet(null);
Preconditions.checkState(previousEvent != null, "Received end status before begin status (%s)", previousEvent);
params.getBuckEventBus().post(TestStatusMessageEvent.finished(previousEvent, didEndMessage));
}
@Override
public void testDidBegin(String testCaseName, String testName) {
LOG.debug("Test rule %s test case %s test name %s began", test.getBuildTarget(), testCaseName, testName);
UUID testUUID = UUID.randomUUID();
// UUID is immutable and thread-safe as of Java 7, so it's
// safe to stash in a map and use later:
//
// http://bugs.java.com/view_bug.do?bug_id=6611830
testUUIDMap.put(testCaseName + ":" + testName, testUUID);
params.getBuckEventBus().post(TestSummaryEvent.started(testUUID, testCaseName, testName));
}
@Override
public void testDidEnd(TestResultSummary testResultSummary) {
LOG.debug("Test rule %s test did end: %s", test.getBuildTarget(), testResultSummary);
UUID testUUID = testUUIDMap.get(testResultSummary.getTestCaseName() + ":" + testResultSummary.getTestName());
Preconditions.checkNotNull(testUUID);
params.getBuckEventBus().post(TestSummaryEvent.finished(testUUID, testResultSummary));
}
@Override
public void testsDidEnd(List<TestCaseSummary> testCaseSummaries) {
LOG.debug("Test rule %s tests did end: %s", test.getBuildTarget(), testCaseSummaries);
}
};
List<Step> steps;
if (isTestRunRequired) {
params.getBuckEventBus().post(IndividualTestEvent.started(testTargets));
ImmutableList.Builder<Step> stepsBuilder = ImmutableList.builder();
Preconditions.checkState(buildEngine.isRuleBuilt(test.getBuildTarget()));
List<Step> testSteps = test.runTests(executionContext, options, sourcePathResolver, testReportingCallback);
if (!testSteps.isEmpty()) {
stepsBuilder.addAll(testSteps);
stepsBuilder.add(testRuleKeyFileHelper.createRuleKeyInDirStep(test));
}
steps = stepsBuilder.build();
} else {
steps = ImmutableList.of();
}
TestRun testRun = TestRun.of(test, steps, getStatusTransformingCallable(isTestRunRequired, resultsInterpreter), testReportingCallback);
// commands because the rule is cached, but its results must still be processed.
if (test.runTestSeparately()) {
LOG.debug("Running test %s in serial", test);
separateTestRuns.add(testRun);
} else {
LOG.debug("Running test %s in parallel", test);
parallelTestRuns.add(testRun);
}
}
for (TestRun testRun : parallelTestRuns) {
ListenableFuture<TestResults> testResults = runStepsAndYieldResult(stepRunner, executionContext, testRun.getSteps(), testRun.getTestResultsCallable(), testRun.getTest().getBuildTarget(), params.getBuckEventBus(), service);
results.add(transformTestResults(params, testResults, testRun.getTest(), testRun.getTestReportingCallback(), testTargets, lastReportedTestSequenceNumber, totalNumberOfTests));
}
ListenableFuture<List<TestResults>> parallelTestStepsFuture = Futures.allAsList(results);
final List<TestResults> completedResults = Lists.newArrayList();
final ListeningExecutorService directExecutorService = MoreExecutors.newDirectExecutorService();
ListenableFuture<Void> uberFuture = MoreFutures.addListenableCallback(parallelTestStepsFuture, new FutureCallback<List<TestResults>>() {
@Override
public void onSuccess(List<TestResults> parallelTestResults) {
LOG.debug("Parallel tests completed, running separate tests...");
completedResults.addAll(parallelTestResults);
List<ListenableFuture<TestResults>> separateResultsList = Lists.newArrayList();
for (TestRun testRun : separateTestRuns) {
separateResultsList.add(transformTestResults(params, runStepsAndYieldResult(stepRunner, executionContext, testRun.getSteps(), testRun.getTestResultsCallable(), testRun.getTest().getBuildTarget(), params.getBuckEventBus(), directExecutorService), testRun.getTest(), testRun.getTestReportingCallback(), testTargets, lastReportedTestSequenceNumber, totalNumberOfTests));
}
ListenableFuture<List<TestResults>> serialResults = Futures.allAsList(separateResultsList);
try {
completedResults.addAll(serialResults.get());
} catch (ExecutionException e) {
LOG.error(e, "Error fetching serial test results");
throw new HumanReadableException(e, "Error fetching serial test results");
} catch (InterruptedException e) {
LOG.error(e, "Interrupted fetching serial test results");
try {
serialResults.cancel(true);
} catch (CancellationException ignored) {
// Rethrow original InterruptedException instead.
}
Thread.currentThread().interrupt();
throw new HumanReadableException(e, "Test cancelled");
}
LOG.debug("Done running serial tests.");
}
@Override
public void onFailure(Throwable e) {
LOG.error(e, "Parallel tests failed, not running serial tests");
throw new HumanReadableException(e, "Parallel tests failed");
}
}, directExecutorService);
try {
// Block until all the tests have finished running.
uberFuture.get();
} catch (ExecutionException e) {
e.printStackTrace(params.getConsole().getStdErr());
return 1;
} catch (InterruptedException e) {
try {
uberFuture.cancel(true);
} catch (CancellationException ignored) {
// Rethrow original InterruptedException instead.
}
Thread.currentThread().interrupt();
throw e;
}
params.getBuckEventBus().post(TestRunEvent.finished(testTargets, completedResults));
// Write out the results as XML, if requested.
Optional<String> path = options.getPathToXmlTestOutput();
if (path.isPresent()) {
try (Writer writer = Files.newWriter(new File(path.get()), Charsets.UTF_8)) {
writeXmlOutput(completedResults, writer);
}
}
// Generate the code coverage report.
if (options.isCodeCoverageEnabled() && !rulesUnderTestForCoverage.isEmpty()) {
try {
JavaBuckConfig javaBuckConfig = params.getBuckConfig().getView(JavaBuckConfig.class);
DefaultJavaPackageFinder defaultJavaPackageFinder = javaBuckConfig.createDefaultJavaPackageFinder();
stepRunner.runStepForBuildTarget(executionContext, getReportCommand(rulesUnderTestForCoverage, defaultJavaPackageFinder, javaBuckConfig.getDefaultJavaOptions().getJavaRuntimeLauncher(), params.getCell().getFilesystem(), sourcePathResolver, ruleFinder, JacocoConstants.getJacocoOutputDir(params.getCell().getFilesystem()), options.getCoverageReportFormat(), options.getCoverageReportTitle(), javaBuckConfig.getDefaultJavacOptions().getSpoolMode() == JavacOptions.SpoolMode.INTERMEDIATE_TO_DISK, options.getCoverageIncludes(), options.getCoverageExcludes()), Optional.empty());
} catch (StepFailedException e) {
params.getBuckEventBus().post(ConsoleEvent.severe(Throwables.getRootCause(e).getLocalizedMessage()));
return 1;
}
}
boolean failures = Iterables.any(completedResults, results1 -> {
LOG.debug("Checking result %s for failure", results1);
return !results1.isSuccess();
});
return failures ? TEST_FAILURES_EXIT_CODE : 0;
}
use of java.util.concurrent.CancellationException in project druid by druid-io.
the class KafkaLookupExtractorFactory method start.
@Override
public boolean start() {
synchronized (started) {
if (started.get()) {
LOG.warn("Already started, not starting again");
return started.get();
}
if (executorService.isShutdown()) {
LOG.warn("Already shut down, not starting again");
return false;
}
final Properties kafkaProperties = new Properties();
kafkaProperties.putAll(getKafkaProperties());
if (kafkaProperties.containsKey("group.id")) {
throw new IAE("Cannot set kafka property [group.id]. Property is randomly generated for you. Found [%s]", kafkaProperties.getProperty("group.id"));
}
if (kafkaProperties.containsKey("auto.offset.reset")) {
throw new IAE("Cannot set kafka property [auto.offset.reset]. Property will be forced to [smallest]. Found [%s]", kafkaProperties.getProperty("auto.offset.reset"));
}
Preconditions.checkNotNull(kafkaProperties.getProperty("zookeeper.connect"), "zookeeper.connect required property");
kafkaProperties.setProperty("group.id", factoryId);
final String topic = getKafkaTopic();
LOG.debug("About to listen to topic [%s] with group.id [%s]", topic, factoryId);
cacheHandler = cacheManager.createCache();
final Map<String, String> map = cacheHandler.getCache();
mapRef.set(map);
// Enable publish-subscribe
kafkaProperties.setProperty("auto.offset.reset", "smallest");
final CountDownLatch startingReads = new CountDownLatch(1);
final ListenableFuture<?> future = executorService.submit(new Runnable() {
@Override
public void run() {
while (!executorService.isShutdown()) {
consumerConnector = buildConnector(kafkaProperties);
try {
if (executorService.isShutdown()) {
break;
}
final List<KafkaStream<String, String>> streams = consumerConnector.createMessageStreamsByFilter(new Whitelist(Pattern.quote(topic)), 1, DEFAULT_STRING_DECODER, DEFAULT_STRING_DECODER);
if (streams == null || streams.isEmpty()) {
throw new IAE("Topic [%s] had no streams", topic);
}
if (streams.size() > 1) {
throw new ISE("Topic [%s] has %d streams! expected 1", topic, streams.size());
}
final KafkaStream<String, String> kafkaStream = streams.get(0);
startingReads.countDown();
for (final MessageAndMetadata<String, String> messageAndMetadata : kafkaStream) {
final String key = messageAndMetadata.key();
final String message = messageAndMetadata.message();
if (key == null || message == null) {
LOG.error("Bad key/message from topic [%s]: [%s]", topic, messageAndMetadata);
continue;
}
doubleEventCount.incrementAndGet();
map.put(key, message);
doubleEventCount.incrementAndGet();
LOG.trace("Placed key[%s] val[%s]", key, message);
}
} catch (Exception e) {
LOG.error(e, "Error reading stream for topic [%s]", topic);
} finally {
consumerConnector.shutdown();
}
}
}
});
Futures.addCallback(future, new FutureCallback<Object>() {
@Override
public void onSuccess(Object result) {
LOG.debug("Success listening to [%s]", topic);
}
@Override
public void onFailure(Throwable t) {
if (t instanceof CancellationException) {
LOG.debug("Topic [%s] cancelled", topic);
} else {
LOG.error(t, "Error in listening to [%s]", topic);
}
}
}, MoreExecutors.sameThreadExecutor());
this.future = future;
final Stopwatch stopwatch = Stopwatch.createStarted();
try {
while (!startingReads.await(100, TimeUnit.MILLISECONDS) && connectTimeout > 0L) {
// Don't return until we have actually connected
if (future.isDone()) {
future.get();
} else {
if (stopwatch.elapsed(TimeUnit.MILLISECONDS) > connectTimeout) {
throw new TimeoutException("Failed to connect to kafka in sufficient time");
}
}
}
} catch (InterruptedException | ExecutionException | TimeoutException e) {
executorService.shutdown();
if (!future.isDone() && !future.cancel(false)) {
LOG.warn("Could not cancel kafka listening thread");
}
LOG.error(e, "Failed to start kafka extraction factory");
cacheHandler.close();
return false;
}
started.set(true);
return true;
}
}
Aggregations