use of org.apache.hadoop.yarn.exceptions.YarnException in project hadoop by apache.
the class RMServerUtils method validateISO8601AndConvertToLocalTimeEpoch.
/**
* Validate ISO8601 format with epoch time.
* @param timeoutsInISO8601 format
* @return expire time in local epoch
* @throws YarnException if given application timeout value is lesser than
* current time.
*/
public static Map<ApplicationTimeoutType, Long> validateISO8601AndConvertToLocalTimeEpoch(Map<ApplicationTimeoutType, String> timeoutsInISO8601) throws YarnException {
long currentTimeMillis = clock.getTime();
Map<ApplicationTimeoutType, Long> newApplicationTimeout = new HashMap<ApplicationTimeoutType, Long>();
if (timeoutsInISO8601 != null) {
for (Map.Entry<ApplicationTimeoutType, String> timeout : timeoutsInISO8601.entrySet()) {
long expireTime = 0L;
try {
expireTime = Times.parseISO8601ToLocalTimeInMillis(timeout.getValue());
} catch (ParseException ex) {
String message = "Expire time is not in ISO8601 format. ISO8601 supported " + "format is yyyy-MM-dd'T'HH:mm:ss.SSSZ. Configured " + "timeout value is " + timeout.getValue();
throw new YarnException(message, ex);
}
if (expireTime < currentTimeMillis) {
String message = "Expire time is less than current time, current-time=" + Times.formatISO8601(currentTimeMillis) + " expire-time=" + Times.formatISO8601(expireTime);
throw new YarnException(message);
}
newApplicationTimeout.put(timeout.getKey(), expireTime);
}
}
return newApplicationTimeout;
}
use of org.apache.hadoop.yarn.exceptions.YarnException in project hadoop by apache.
the class NodesListManager method disableHostsFileReader.
private void disableHostsFileReader(Exception ex) {
LOG.warn("Failed to init hostsReader, disabling", ex);
try {
this.includesFile = conf.get(YarnConfiguration.DEFAULT_RM_NODES_INCLUDE_FILE_PATH);
this.excludesFile = conf.get(YarnConfiguration.DEFAULT_RM_NODES_EXCLUDE_FILE_PATH);
this.hostsReader = createHostsFileReader(this.includesFile, this.excludesFile);
setDecomissionedNMs();
} catch (IOException ioe2) {
// Should *never* happen
this.hostsReader = null;
throw new YarnRuntimeException(ioe2);
} catch (YarnException e) {
// Should *never* happen
this.hostsReader = null;
throw new YarnRuntimeException(e);
}
}
use of org.apache.hadoop.yarn.exceptions.YarnException in project hadoop by apache.
the class NodesListManager method serviceInit.
@Override
protected void serviceInit(Configuration conf) throws Exception {
this.conf = conf;
int nodeIpCacheTimeout = conf.getInt(YarnConfiguration.RM_NODE_IP_CACHE_EXPIRY_INTERVAL_SECS, YarnConfiguration.DEFAULT_RM_NODE_IP_CACHE_EXPIRY_INTERVAL_SECS);
if (nodeIpCacheTimeout <= 0) {
resolver = new DirectResolver();
} else {
resolver = new CachedResolver(SystemClock.getInstance(), nodeIpCacheTimeout);
addIfService(resolver);
}
// Read the hosts/exclude files to restrict access to the RM
try {
this.includesFile = conf.get(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH, YarnConfiguration.DEFAULT_RM_NODES_INCLUDE_FILE_PATH);
this.excludesFile = conf.get(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH, YarnConfiguration.DEFAULT_RM_NODES_EXCLUDE_FILE_PATH);
this.hostsReader = createHostsFileReader(this.includesFile, this.excludesFile);
setDecomissionedNMs();
printConfiguredHosts();
} catch (YarnException ex) {
disableHostsFileReader(ex);
} catch (IOException ioe) {
disableHostsFileReader(ioe);
}
final int nodeRemovalTimeout = conf.getInt(YarnConfiguration.RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC, YarnConfiguration.DEFAULT_RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC);
nodeRemovalCheckInterval = (Math.min(nodeRemovalTimeout / 2, 600000));
removalTimer = new Timer("Node Removal Timer");
removalTimer.schedule(new TimerTask() {
@Override
public void run() {
long now = Time.monotonicNow();
for (Map.Entry<NodeId, RMNode> entry : rmContext.getInactiveRMNodes().entrySet()) {
NodeId nodeId = entry.getKey();
RMNode rmNode = entry.getValue();
if (isUntrackedNode(rmNode.getHostName())) {
if (rmNode.getUntrackedTimeStamp() == 0) {
rmNode.setUntrackedTimeStamp(now);
} else if (now - rmNode.getUntrackedTimeStamp() > nodeRemovalTimeout) {
RMNode result = rmContext.getInactiveRMNodes().remove(nodeId);
if (result != null) {
decrInactiveNMMetrics(rmNode);
LOG.info("Removed " + result.getState().toString() + " node " + result.getHostName() + " from inactive nodes list");
}
}
} else {
rmNode.setUntrackedTimeStamp(0);
}
}
}
}, nodeRemovalCheckInterval, nodeRemovalCheckInterval);
super.serviceInit(conf);
}
use of org.apache.hadoop.yarn.exceptions.YarnException in project hadoop by apache.
the class ClientRMService method getApplicationAttemptReport.
@Override
public GetApplicationAttemptReportResponse getApplicationAttemptReport(GetApplicationAttemptReportRequest request) throws YarnException, IOException {
ApplicationAttemptId appAttemptId = request.getApplicationAttemptId();
UserGroupInformation callerUGI;
try {
callerUGI = UserGroupInformation.getCurrentUser();
} catch (IOException ie) {
LOG.info("Error getting UGI ", ie);
throw RPCUtil.getRemoteException(ie);
}
RMApp application = this.rmContext.getRMApps().get(appAttemptId.getApplicationId());
if (application == null) {
// ApplicationNotFoundException and let client to handle.
throw new ApplicationNotFoundException("Application with id '" + request.getApplicationAttemptId().getApplicationId() + "' doesn't exist in RM. Please check that the job " + "submission was successful.");
}
boolean allowAccess = checkAccess(callerUGI, application.getUser(), ApplicationAccessType.VIEW_APP, application);
GetApplicationAttemptReportResponse response = null;
if (allowAccess) {
RMAppAttempt appAttempt = application.getAppAttempts().get(appAttemptId);
if (appAttempt == null) {
throw new ApplicationAttemptNotFoundException("ApplicationAttempt with id '" + appAttemptId + "' doesn't exist in RM.");
}
ApplicationAttemptReport attemptReport = appAttempt.createApplicationAttemptReport();
response = GetApplicationAttemptReportResponse.newInstance(attemptReport);
} else {
throw new YarnException("User " + callerUGI.getShortUserName() + " does not have privilege to see this attempt " + appAttemptId);
}
return response;
}
use of org.apache.hadoop.yarn.exceptions.YarnException in project hadoop by apache.
the class AdminService method refreshClusterMaxPriority.
@Override
public RefreshClusterMaxPriorityResponse refreshClusterMaxPriority(RefreshClusterMaxPriorityRequest request) throws YarnException, IOException {
final String operation = "refreshClusterMaxPriority";
final String msg = "refresh cluster max priority";
UserGroupInformation user = checkAcls(operation);
checkRMStatus(user.getShortUserName(), operation, msg);
try {
refreshClusterMaxPriority();
RMAuditLogger.logSuccess(user.getShortUserName(), operation, "AdminService");
return recordFactory.newRecordInstance(RefreshClusterMaxPriorityResponse.class);
} catch (YarnException e) {
throw logAndWrapException(e, user.getShortUserName(), operation, msg);
}
}
Aggregations