use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class LogAggregationService method initAppAggregator.
protected void initAppAggregator(final ApplicationId appId, String user, Credentials credentials, Map<ApplicationAccessType, String> appAcls, LogAggregationContext logAggregationContext, long recoveredLogInitedTime) {
// Get user's FileSystem credentials
final UserGroupInformation userUgi = UserGroupInformation.createRemoteUser(user);
if (credentials != null) {
userUgi.addCredentials(credentials);
}
// New application
final AppLogAggregator appLogAggregator = new AppLogAggregatorImpl(this.dispatcher, this.deletionService, getConfig(), appId, userUgi, this.nodeId, dirsHandler, getRemoteNodeLogFileForApp(appId, user), appAcls, logAggregationContext, this.context, getLocalFileContext(getConfig()), this.rollingMonitorInterval, recoveredLogInitedTime);
if (this.appLogAggregators.putIfAbsent(appId, appLogAggregator) != null) {
throw new YarnRuntimeException("Duplicate initApp for " + appId);
}
// wait until check for existing aggregator to create dirs
YarnRuntimeException appDirException = null;
try {
// Create the app dir
createAppDir(user, appId, userUgi);
} catch (Exception e) {
appLogAggregator.disableLogAggregation();
if (!(e instanceof YarnRuntimeException)) {
appDirException = new YarnRuntimeException(e);
} else {
appDirException = (YarnRuntimeException) e;
}
appLogAggregators.remove(appId);
closeFileSystems(userUgi);
throw appDirException;
}
// TODO Get the user configuration for the list of containers that need log
// aggregation.
// Schedule the aggregator.
Runnable aggregatorWrapper = new Runnable() {
public void run() {
try {
appLogAggregator.run();
} finally {
appLogAggregators.remove(appId);
closeFileSystems(userUgi);
}
}
};
this.threadPool.execute(aggregatorWrapper);
}
use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class ResourceLocalizationService method serviceInit.
@Override
public void serviceInit(Configuration conf) throws Exception {
this.validateConf(conf);
this.publicRsrc = new LocalResourcesTrackerImpl(null, null, dispatcher, true, conf, stateStore, dirsHandler);
this.recordFactory = RecordFactoryProvider.getRecordFactory(conf);
try {
lfs = getLocalFileContext(conf);
lfs.setUMask(new FsPermission((short) FsPermission.DEFAULT_UMASK));
if (!stateStore.canRecover() || stateStore.isNewlyCreated()) {
cleanUpLocalDirs(lfs, delService);
cleanupLogDirs(lfs, delService);
initializeLocalDirs(lfs);
initializeLogDirs(lfs);
}
} catch (Exception e) {
throw new YarnRuntimeException("Failed to initialize LocalizationService", e);
}
diskValidator = DiskValidatorFactory.getInstance(conf.get(YarnConfiguration.DISK_VALIDATOR));
LOG.info("Disk Validator: " + YarnConfiguration.DISK_VALIDATOR + " is loaded.");
cacheTargetSize = conf.getLong(YarnConfiguration.NM_LOCALIZER_CACHE_TARGET_SIZE_MB, YarnConfiguration.DEFAULT_NM_LOCALIZER_CACHE_TARGET_SIZE_MB) << 20;
cacheCleanupPeriod = conf.getLong(YarnConfiguration.NM_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS, YarnConfiguration.DEFAULT_NM_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS);
localizationServerAddress = conf.getSocketAddr(YarnConfiguration.NM_BIND_HOST, YarnConfiguration.NM_LOCALIZER_ADDRESS, YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS, YarnConfiguration.DEFAULT_NM_LOCALIZER_PORT);
localizerTracker = createLocalizerTracker(conf);
addService(localizerTracker);
dispatcher.register(LocalizerEventType.class, localizerTracker);
localDirsChangeListener = new DirsChangeListener() {
@Override
public void onDirsChanged() {
checkAndInitializeLocalDirs();
}
};
logDirsChangeListener = new DirsChangeListener() {
@Override
public void onDirsChanged() {
initializeLogDirs(lfs);
}
};
super.serviceInit(conf);
}
use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class RMSecretManagerService method serviceStart.
@Override
public void serviceStart() throws Exception {
amRmTokenSecretManager.start();
containerTokenSecretManager.start();
nmTokenSecretManager.start();
try {
rmDTSecretManager.startThreads();
} catch (IOException ie) {
throw new YarnRuntimeException("Failed to start secret manager threads", ie);
}
super.serviceStart();
}
use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class NodesListManager method disableHostsFileReader.
private void disableHostsFileReader(Exception ex) {
LOG.warn("Failed to init hostsReader, disabling", ex);
try {
this.includesFile = conf.get(YarnConfiguration.DEFAULT_RM_NODES_INCLUDE_FILE_PATH);
this.excludesFile = conf.get(YarnConfiguration.DEFAULT_RM_NODES_EXCLUDE_FILE_PATH);
this.hostsReader = createHostsFileReader(this.includesFile, this.excludesFile);
setDecomissionedNMs();
} catch (IOException ioe2) {
// Should *never* happen
this.hostsReader = null;
throw new YarnRuntimeException(ioe2);
} catch (YarnException e) {
// Should *never* happen
this.hostsReader = null;
throw new YarnRuntimeException(e);
}
}
use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class ResourceManager method createApplicationMasterService.
protected ApplicationMasterService createApplicationMasterService() {
Configuration config = this.rmContext.getYarnConfiguration();
if (YarnConfiguration.isOpportunisticContainerAllocationEnabled(config) || YarnConfiguration.isDistSchedulingEnabled(config)) {
if (YarnConfiguration.isDistSchedulingEnabled(config) && !YarnConfiguration.isOpportunisticContainerAllocationEnabled(config)) {
throw new YarnRuntimeException("Invalid parameters: opportunistic container allocation has to " + "be enabled when distributed scheduling is enabled.");
}
OpportunisticContainerAllocatorAMService oppContainerAllocatingAMService = new OpportunisticContainerAllocatorAMService(this.rmContext, scheduler);
EventDispatcher oppContainerAllocEventDispatcher = new EventDispatcher(oppContainerAllocatingAMService, OpportunisticContainerAllocatorAMService.class.getName());
// Add an event dispatcher for the
// OpportunisticContainerAllocatorAMService to handle node
// additions, updates and removals. Since the SchedulerEvent is currently
// a super set of theses, we register interest for it.
addService(oppContainerAllocEventDispatcher);
rmDispatcher.register(SchedulerEventType.class, oppContainerAllocEventDispatcher);
this.rmContext.setContainerQueueLimitCalculator(oppContainerAllocatingAMService.getNodeManagerQueueLimitCalculator());
return oppContainerAllocatingAMService;
}
return new ApplicationMasterService(this.rmContext, scheduler);
}
Aggregations