Search in sources :

Example 16 with YarnRuntimeException

use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.

the class LogAggregationService method initAppAggregator.

protected void initAppAggregator(final ApplicationId appId, String user, Credentials credentials, Map<ApplicationAccessType, String> appAcls, LogAggregationContext logAggregationContext, long recoveredLogInitedTime) {
    // Get user's FileSystem credentials
    final UserGroupInformation userUgi = UserGroupInformation.createRemoteUser(user);
    if (credentials != null) {
        userUgi.addCredentials(credentials);
    }
    // New application
    final AppLogAggregator appLogAggregator = new AppLogAggregatorImpl(this.dispatcher, this.deletionService, getConfig(), appId, userUgi, this.nodeId, dirsHandler, getRemoteNodeLogFileForApp(appId, user), appAcls, logAggregationContext, this.context, getLocalFileContext(getConfig()), this.rollingMonitorInterval, recoveredLogInitedTime);
    if (this.appLogAggregators.putIfAbsent(appId, appLogAggregator) != null) {
        throw new YarnRuntimeException("Duplicate initApp for " + appId);
    }
    // wait until check for existing aggregator to create dirs
    YarnRuntimeException appDirException = null;
    try {
        // Create the app dir
        createAppDir(user, appId, userUgi);
    } catch (Exception e) {
        appLogAggregator.disableLogAggregation();
        if (!(e instanceof YarnRuntimeException)) {
            appDirException = new YarnRuntimeException(e);
        } else {
            appDirException = (YarnRuntimeException) e;
        }
        appLogAggregators.remove(appId);
        closeFileSystems(userUgi);
        throw appDirException;
    }
    // TODO Get the user configuration for the list of containers that need log
    // aggregation.
    // Schedule the aggregator.
    Runnable aggregatorWrapper = new Runnable() {

        public void run() {
            try {
                appLogAggregator.run();
            } finally {
                appLogAggregators.remove(appId);
                closeFileSystems(userUgi);
            }
        }
    };
    this.threadPool.execute(aggregatorWrapper);
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 17 with YarnRuntimeException

use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.

the class ResourceLocalizationService method serviceInit.

@Override
public void serviceInit(Configuration conf) throws Exception {
    this.validateConf(conf);
    this.publicRsrc = new LocalResourcesTrackerImpl(null, null, dispatcher, true, conf, stateStore, dirsHandler);
    this.recordFactory = RecordFactoryProvider.getRecordFactory(conf);
    try {
        lfs = getLocalFileContext(conf);
        lfs.setUMask(new FsPermission((short) FsPermission.DEFAULT_UMASK));
        if (!stateStore.canRecover() || stateStore.isNewlyCreated()) {
            cleanUpLocalDirs(lfs, delService);
            cleanupLogDirs(lfs, delService);
            initializeLocalDirs(lfs);
            initializeLogDirs(lfs);
        }
    } catch (Exception e) {
        throw new YarnRuntimeException("Failed to initialize LocalizationService", e);
    }
    diskValidator = DiskValidatorFactory.getInstance(conf.get(YarnConfiguration.DISK_VALIDATOR));
    LOG.info("Disk Validator: " + YarnConfiguration.DISK_VALIDATOR + " is loaded.");
    cacheTargetSize = conf.getLong(YarnConfiguration.NM_LOCALIZER_CACHE_TARGET_SIZE_MB, YarnConfiguration.DEFAULT_NM_LOCALIZER_CACHE_TARGET_SIZE_MB) << 20;
    cacheCleanupPeriod = conf.getLong(YarnConfiguration.NM_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS, YarnConfiguration.DEFAULT_NM_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS);
    localizationServerAddress = conf.getSocketAddr(YarnConfiguration.NM_BIND_HOST, YarnConfiguration.NM_LOCALIZER_ADDRESS, YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS, YarnConfiguration.DEFAULT_NM_LOCALIZER_PORT);
    localizerTracker = createLocalizerTracker(conf);
    addService(localizerTracker);
    dispatcher.register(LocalizerEventType.class, localizerTracker);
    localDirsChangeListener = new DirsChangeListener() {

        @Override
        public void onDirsChanged() {
            checkAndInitializeLocalDirs();
        }
    };
    logDirsChangeListener = new DirsChangeListener() {

        @Override
        public void onDirsChanged() {
            initializeLogDirs(lfs);
        }
    };
    super.serviceInit(conf);
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) DirsChangeListener(org.apache.hadoop.yarn.server.nodemanager.DirectoryCollection.DirsChangeListener) FsPermission(org.apache.hadoop.fs.permission.FsPermission) CancellationException(java.util.concurrent.CancellationException) FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) IOException(java.io.IOException) YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) ExecutionException(java.util.concurrent.ExecutionException) UnsupportedFileSystemException(org.apache.hadoop.fs.UnsupportedFileSystemException) URISyntaxException(java.net.URISyntaxException) FileNotFoundException(java.io.FileNotFoundException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException)

Example 18 with YarnRuntimeException

use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.

the class RMSecretManagerService method serviceStart.

@Override
public void serviceStart() throws Exception {
    amRmTokenSecretManager.start();
    containerTokenSecretManager.start();
    nmTokenSecretManager.start();
    try {
        rmDTSecretManager.startThreads();
    } catch (IOException ie) {
        throw new YarnRuntimeException("Failed to start secret manager threads", ie);
    }
    super.serviceStart();
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) IOException(java.io.IOException)

Example 19 with YarnRuntimeException

use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.

the class NodesListManager method disableHostsFileReader.

private void disableHostsFileReader(Exception ex) {
    LOG.warn("Failed to init hostsReader, disabling", ex);
    try {
        this.includesFile = conf.get(YarnConfiguration.DEFAULT_RM_NODES_INCLUDE_FILE_PATH);
        this.excludesFile = conf.get(YarnConfiguration.DEFAULT_RM_NODES_EXCLUDE_FILE_PATH);
        this.hostsReader = createHostsFileReader(this.includesFile, this.excludesFile);
        setDecomissionedNMs();
    } catch (IOException ioe2) {
        // Should *never* happen
        this.hostsReader = null;
        throw new YarnRuntimeException(ioe2);
    } catch (YarnException e) {
        // Should *never* happen
        this.hostsReader = null;
        throw new YarnRuntimeException(e);
    }
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) IOException(java.io.IOException) YarnException(org.apache.hadoop.yarn.exceptions.YarnException)

Example 20 with YarnRuntimeException

use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.

the class ResourceManager method createApplicationMasterService.

protected ApplicationMasterService createApplicationMasterService() {
    Configuration config = this.rmContext.getYarnConfiguration();
    if (YarnConfiguration.isOpportunisticContainerAllocationEnabled(config) || YarnConfiguration.isDistSchedulingEnabled(config)) {
        if (YarnConfiguration.isDistSchedulingEnabled(config) && !YarnConfiguration.isOpportunisticContainerAllocationEnabled(config)) {
            throw new YarnRuntimeException("Invalid parameters: opportunistic container allocation has to " + "be enabled when distributed scheduling is enabled.");
        }
        OpportunisticContainerAllocatorAMService oppContainerAllocatingAMService = new OpportunisticContainerAllocatorAMService(this.rmContext, scheduler);
        EventDispatcher oppContainerAllocEventDispatcher = new EventDispatcher(oppContainerAllocatingAMService, OpportunisticContainerAllocatorAMService.class.getName());
        // Add an event dispatcher for the
        // OpportunisticContainerAllocatorAMService to handle node
        // additions, updates and removals. Since the SchedulerEvent is currently
        // a super set of theses, we register interest for it.
        addService(oppContainerAllocEventDispatcher);
        rmDispatcher.register(SchedulerEventType.class, oppContainerAllocEventDispatcher);
        this.rmContext.setContainerQueueLimitCalculator(oppContainerAllocatingAMService.getNodeManagerQueueLimitCalculator());
        return oppContainerAllocatingAMService;
    }
    return new ApplicationMasterService(this.rmContext, scheduler);
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) EventDispatcher(org.apache.hadoop.yarn.event.EventDispatcher) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration)

Aggregations

YarnRuntimeException (org.apache.hadoop.yarn.exceptions.YarnRuntimeException)147 IOException (java.io.IOException)56 Configuration (org.apache.hadoop.conf.Configuration)38 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)28 Test (org.junit.Test)28 YarnException (org.apache.hadoop.yarn.exceptions.YarnException)17 InetSocketAddress (java.net.InetSocketAddress)12 Path (org.apache.hadoop.fs.Path)12 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)9 InvocationTargetException (java.lang.reflect.InvocationTargetException)8 Server (org.apache.hadoop.ipc.Server)8 FileSystem (org.apache.hadoop.fs.FileSystem)7 FsPermission (org.apache.hadoop.fs.permission.FsPermission)7 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)7 FileNotFoundException (java.io.FileNotFoundException)6 ArrayList (java.util.ArrayList)6 HashMap (java.util.HashMap)6 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)6 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)6 ConnectException (java.net.ConnectException)5