use of org.apache.commons.lang.time.StopWatch in project weave by continuuity.
the class YarnWeaveController method doStartUp.
@Override
protected void doStartUp() {
super.doStartUp();
// Submit and poll the status of the yarn application
try {
processController = startUp.call();
YarnApplicationReport report = processController.getReport();
LOG.debug("Application {} submit", report.getApplicationId());
YarnApplicationState state = report.getYarnApplicationState();
StopWatch stopWatch = new StopWatch();
stopWatch.start();
stopWatch.split();
long maxTime = TimeUnit.MILLISECONDS.convert(Constants.APPLICATION_MAX_START_SECONDS, TimeUnit.SECONDS);
LOG.info("Checking yarn application status");
while (!hasRun(state) && stopWatch.getSplitTime() < maxTime) {
report = processController.getReport();
state = report.getYarnApplicationState();
LOG.debug("Yarn application status: {}", state);
TimeUnit.SECONDS.sleep(1);
stopWatch.split();
}
LOG.info("Yarn application is in state {}", state);
if (state != YarnApplicationState.RUNNING) {
LOG.info("Yarn application is not in running state. Shutting down controller.", Constants.APPLICATION_MAX_START_SECONDS);
forceShutDown();
} else {
try {
URL resourceUrl = URI.create(String.format("http://%s:%d", report.getHost(), report.getRpcPort())).resolve(TrackerService.PATH).toURL();
resourcesClient = new ResourceReportClient(resourceUrl);
} catch (IOException e) {
resourcesClient = null;
}
}
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
use of org.apache.commons.lang.time.StopWatch in project weave by continuuity.
the class YarnWeaveController method doShutDown.
@Override
protected void doShutDown() {
if (processController == null) {
LOG.warn("No process controller for application that is not submitted.");
return;
}
// Wait for the stop message being processed
try {
Uninterruptibles.getUninterruptibly(getStopMessageFuture(), Constants.APPLICATION_MAX_STOP_SECONDS, TimeUnit.SECONDS);
} catch (Exception e) {
LOG.error("Failed to wait for stop message being processed.", e);
// Kill the application through yarn
kill();
}
// Poll application status from yarn
try {
StopWatch stopWatch = new StopWatch();
stopWatch.start();
stopWatch.split();
long maxTime = TimeUnit.MILLISECONDS.convert(Constants.APPLICATION_MAX_STOP_SECONDS, TimeUnit.SECONDS);
YarnApplicationReport report = processController.getReport();
FinalApplicationStatus finalStatus = report.getFinalApplicationStatus();
while (finalStatus == FinalApplicationStatus.UNDEFINED && stopWatch.getSplitTime() < maxTime) {
LOG.debug("Yarn application final status for {} {}", report.getApplicationId(), finalStatus);
TimeUnit.SECONDS.sleep(1);
stopWatch.split();
finalStatus = processController.getReport().getFinalApplicationStatus();
}
LOG.debug("Yarn application final status is {}", finalStatus);
// Application not finished after max stop time, kill the application
if (finalStatus == FinalApplicationStatus.UNDEFINED) {
kill();
}
} catch (Exception e) {
LOG.warn("Exception while waiting for application report: {}", e.getMessage(), e);
kill();
}
super.doShutDown();
}
use of org.apache.commons.lang.time.StopWatch in project OpenAM by OpenRock.
the class CTSReaper method run.
/**
* Performs the query against the directory by selecting the Token IDs for all Tokens
* that have expired. These Token IDs are then scheduled for deletion. The task will
* not complete until all of the delete operations have returned.
*/
public void run() {
debug("Reaper starting");
// Timers for debugging
StopWatch query = new StopWatch();
StopWatch waiting = new StopWatch();
// Latches will track deletion of each page of results
List<CountDownLatch> latches = new ArrayList<CountDownLatch>();
ReaperQuery reaperQuery = queryFactory.getQuery();
try {
long total = 0;
query.start();
for (Collection<String> ids = reaperQuery.nextPage(); ids != null; ids = reaperQuery.nextPage()) {
// If the thread has been interrupted, exit all processing.
if (Thread.interrupted()) {
Thread.currentThread().interrupt();
debug("Interrupted, returning");
return;
}
total += ids.size();
debug("Queried {0} tokens", Long.toString(total));
// Latch will track the deletions of the page
latches.add(tokenDeletion.deleteBatch(ids));
}
query.stop();
waiting.start();
debug("Expired Token Query Time: {0}ms", Long.toString(query.getTime()));
// Wait stage
for (CountDownLatch latch : latches) {
try {
latch.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
waiting.stop();
monitoringStore.addReaperRun(query.getStartTime(), query.getTime() + waiting.getTime(), total);
debug("Worker threads Time: {0}ms", Long.toString(waiting.getTime()));
} catch (CoreTokenException e) {
debug.error("CTS Reaper failed", e);
}
debug("Reaper complete");
}
use of org.apache.commons.lang.time.StopWatch in project sling by apache.
the class HealthCheckExecutorImpl method waitForFuturesRespectingTimeout.
/**
* Wait for the futures until the timeout is reached
*/
private void waitForFuturesRespectingTimeout(final List<HealthCheckFuture> futuresForResultOfThisCall, HealthCheckExecutionOptions options) {
final StopWatch callExcutionTimeStopWatch = new StopWatch();
callExcutionTimeStopWatch.start();
boolean allFuturesDone;
long effectiveTimeout = this.timeoutInMs;
if (options != null && options.getOverrideGlobalTimeout() > 0) {
effectiveTimeout = options.getOverrideGlobalTimeout();
}
if (futuresForResultOfThisCall.isEmpty()) {
// nothing to wait for (usually because of cached results)
return;
}
do {
try {
synchronized (stillRunningFutures) {
// wait for notifications of callbacks of HealthCheckFutures
stillRunningFutures.wait(50);
}
} catch (final InterruptedException ie) {
logger.warn("Unexpected InterruptedException while waiting for healthCheckContributors", ie);
}
allFuturesDone = true;
for (final HealthCheckFuture healthCheckFuture : futuresForResultOfThisCall) {
allFuturesDone &= healthCheckFuture.isDone();
}
} while (!allFuturesDone && callExcutionTimeStopWatch.getTime() < effectiveTimeout);
}
use of org.apache.commons.lang.time.StopWatch in project nanopub-server by tkuhn.
the class CollectNanopubs method processPage.
private void processPage(int page, boolean isLastPage, long ignoreBeforePos) throws Exception {
parent.stillAlive();
logger.info("Process page " + page + " from " + peerInfo.getPublicUrl());
loaded = 0;
nextNp = (page - 1) * peerPageSize;
List<String> toLoad = new ArrayList<>();
boolean downloadAsPackage = false;
for (String nanopubUri : NanopubServerUtils.loadNanopubUriList(peerInfo, page)) {
parent.stillAlive();
if (nextNp >= ignoreBeforePos) {
String ac = TrustyUriUtils.getArtifactCode(nanopubUri);
if (ac != null && ourPattern.matchesUri(nanopubUri) && !db.hasNanopub(ac)) {
toLoad.add(ac);
if (!isLastPage && toLoad.size() > 5) {
// Download entire package if more than 5 nanopubs are new
downloadAsPackage = true;
nextNp = (page - 1) * peerPageSize;
break;
}
}
}
nextNp++;
}
RequestConfig requestConfig = RequestConfig.custom().setConnectTimeout(5 * 1000).build();
HttpClient c = HttpClientBuilder.create().setDefaultRequestConfig(requestConfig).build();
watch = new StopWatch();
watch.start();
if (downloadAsPackage) {
logger.info("Download page " + page + " as compressed package...");
HttpGet get = new HttpGet(peerInfo.getPublicUrl() + "package.gz?page=" + page);
get.setHeader("Accept", "application/x-gzip");
HttpResponse resp = c.execute(get);
InputStream in = null;
try {
if (wasSuccessful(resp)) {
in = new GZIPInputStream(resp.getEntity().getContent());
} else {
logger.info("Failed. Trying uncompressed package...");
// This is for compability with older versions; to be removed at some point...
get = new HttpGet(peerInfo.getPublicUrl() + "package?page=" + page);
get.setHeader("Accept", "application/trig");
resp = c.execute(get);
if (!wasSuccessful(resp)) {
logger.error("HTTP request failed: " + resp.getStatusLine().getReasonPhrase());
recordTime();
throw new RuntimeException(resp.getStatusLine().getReasonPhrase());
}
in = resp.getEntity().getContent();
}
MultiNanopubRdfHandler.process(RDFFormat.TRIG, in, new NanopubHandler() {
@Override
public void handleNanopub(Nanopub np) {
nextNp++;
if (watch.getTime() > 5 * 60 * 1000) {
// Downloading the whole package should never take more than 5 minutes.
logger.error("Downloading package took too long; interrupting");
recordTime();
throw new RuntimeException("Downloading package took too long; interrupting");
}
if (!ourPattern.matchesUri(np.getUri().stringValue()))
return;
try {
loadNanopub(np);
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
});
} finally {
if (in != null)
in.close();
}
} else {
logger.info("Download " + toLoad.size() + " nanopubs individually...");
for (String ac : toLoad) {
parent.stillAlive();
HttpGet get = new HttpGet(peerInfo.getPublicUrl() + ac);
get.setHeader("Accept", "application/trig");
HttpResponse resp = c.execute(get);
if (!wasSuccessful(resp)) {
logger.error("HTTP request failed: " + resp.getStatusLine().getReasonPhrase());
recordTime();
throw new RuntimeException(resp.getStatusLine().getReasonPhrase());
}
InputStream in = null;
try {
in = resp.getEntity().getContent();
loadNanopub(new NanopubImpl(in, RDFFormat.TRIG));
} finally {
if (in != null)
in.close();
}
}
}
recordTime();
logger.info("Update peer state: " + peerInfo.getPublicUrl() + " at position " + nextNp);
db.updatePeerState(peerInfo, nextNp);
}
Aggregations