use of com.continuuity.weave.internal.yarn.YarnContainerStatus in project weave by continuuity.
the class ApplicationMasterService method doStop.
private void doStop() throws Exception {
// This is just to clear the interrupt flag
Thread.interrupted();
LOG.info("Stop application master with spec: {}", WeaveSpecificationAdapter.create().toJson(weaveSpec));
try {
// call event handler destroy. If there is error, only log and not affected stop sequence.
eventHandler.destroy();
} catch (Throwable t) {
LOG.warn("Exception when calling {}.destroy()", weaveSpec.getEventHandler().getClassName(), t);
}
instanceChangeExecutor.shutdownNow();
// For checking if all containers are stopped.
final Set<String> ids = Sets.newHashSet(runningContainers.getContainerIds());
YarnAMClient.AllocateHandler handler = new YarnAMClient.AllocateHandler() {
@Override
public void acquired(List<ProcessLauncher<YarnContainerInfo>> launchers) {
// no-op
}
@Override
public void completed(List<YarnContainerStatus> completed) {
for (YarnContainerStatus status : completed) {
ids.remove(status.getContainerId());
}
}
};
runningContainers.stopAll();
// Poll for 5 seconds to wait for containers to stop.
int count = 0;
while (!ids.isEmpty() && count++ < 5) {
amClient.allocate(0.0f, handler);
TimeUnit.SECONDS.sleep(1);
}
LOG.info("Stopping application master tracker server");
try {
trackerService.stopAndWait();
LOG.info("Stopped application master tracker server");
} catch (Exception e) {
LOG.error("Failed to stop tracker service.", e);
} finally {
try {
// App location cleanup
cleanupDir(URI.create(System.getenv(EnvKeys.WEAVE_APP_DIR)));
Loggings.forceFlush();
// Sleep a short while to let kafka clients to have chance to fetch the log
TimeUnit.SECONDS.sleep(1);
} finally {
kafkaServer.stopAndWait();
LOG.info("Kafka server stopped");
}
}
}
use of com.continuuity.weave.internal.yarn.YarnContainerStatus in project weave by continuuity.
the class ApplicationMasterService method handleCompleted.
/**
* Handling containers that are completed.
*/
private void handleCompleted(List<YarnContainerStatus> completedContainersStatuses) {
Multiset<String> restartRunnables = HashMultiset.create();
for (YarnContainerStatus status : completedContainersStatuses) {
LOG.info("Container {} completed with {}:{}.", status.getContainerId(), status.getState(), status.getDiagnostics());
runningContainers.handleCompleted(status, restartRunnables);
}
for (Multiset.Entry<String> entry : restartRunnables.entrySet()) {
LOG.info("Re-request container for {} with {} instances.", entry.getElement(), entry.getCount());
for (int i = 0; i < entry.getCount(); i++) {
runnableContainerRequests.add(createRunnableContainerRequest(entry.getElement()));
}
}
// For all runnables that needs to re-request for containers, update the expected count timestamp
// so that the EventHandler would triggered with the right expiration timestamp.
expectedContainers.updateRequestTime(restartRunnables.elementSet());
}
Aggregations