Search in sources :

Example 6 with YarnRuntimeException

use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.

the class MemoryRMStateStore method updateApplicationAttemptStateInternal.

@Override
public synchronized void updateApplicationAttemptStateInternal(ApplicationAttemptId appAttemptId, ApplicationAttemptStateData attemptState) throws Exception {
    ApplicationStateData appState = state.getApplicationState().get(appAttemptId.getApplicationId());
    if (appState == null) {
        throw new YarnRuntimeException("Application doesn't exist");
    }
    LOG.info("Updating final state " + attemptState.getState() + " for attempt: " + attemptState.getAttemptId());
    appState.attempts.put(attemptState.getAttemptId(), attemptState);
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) ApplicationStateData(org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData)

Example 7 with YarnRuntimeException

use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.

the class MiniMRYarnCluster method serviceInit.

@Override
public void serviceInit(Configuration conf) throws Exception {
    conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
    if (conf.get(MRJobConfig.MR_AM_STAGING_DIR) == null) {
        conf.set(MRJobConfig.MR_AM_STAGING_DIR, new File(getTestWorkDir(), "apps_staging_dir/").getAbsolutePath());
    }
    // By default, VMEM monitoring disabled, PMEM monitoring enabled.
    if (!conf.getBoolean(MRConfig.MAPREDUCE_MINICLUSTER_CONTROL_RESOURCE_MONITORING, MRConfig.DEFAULT_MAPREDUCE_MINICLUSTER_CONTROL_RESOURCE_MONITORING)) {
        conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
        conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);
    }
    conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "000");
    try {
        Path stagingPath = FileContext.getFileContext(conf).makeQualified(new Path(conf.get(MRJobConfig.MR_AM_STAGING_DIR)));
        /*
       * Re-configure the staging path on Windows if the file system is localFs.
       * We need to use a absolute path that contains the drive letter. The unit
       * test could run on a different drive than the AM. We can run into the
       * issue that job files are localized to the drive where the test runs on,
       * while the AM starts on a different drive and fails to find the job
       * metafiles. Using absolute path can avoid this ambiguity.
       */
        if (Path.WINDOWS) {
            if (LocalFileSystem.class.isInstance(stagingPath.getFileSystem(conf))) {
                conf.set(MRJobConfig.MR_AM_STAGING_DIR, new File(conf.get(MRJobConfig.MR_AM_STAGING_DIR)).getAbsolutePath());
            }
        }
        FileContext fc = FileContext.getFileContext(stagingPath.toUri(), conf);
        if (fc.util().exists(stagingPath)) {
            LOG.info(stagingPath + " exists! deleting...");
            fc.delete(stagingPath, true);
        }
        LOG.info("mkdir: " + stagingPath);
        //mkdir the staging directory so that right permissions are set while running as proxy user
        fc.mkdir(stagingPath, null, true);
        //mkdir done directory as well 
        String doneDir = JobHistoryUtils.getConfiguredHistoryServerDoneDirPrefix(conf);
        Path doneDirPath = fc.makeQualified(new Path(doneDir));
        fc.mkdir(doneDirPath, null, true);
    } catch (IOException e) {
        throw new YarnRuntimeException("Could not create staging directory. ", e);
    }
    // The default is local because of
    conf.set(MRConfig.MASTER_ADDRESS, "test");
    // which shuffle doesn't happen
    //configure the shuffle service in NM
    String[] nmAuxServices = conf.getStrings(YarnConfiguration.NM_AUX_SERVICES);
    // if need to enable TIMELINE_AUX_SERVICE_NAME
    boolean enableTimelineAuxService = false;
    if (nmAuxServices != null) {
        for (String nmAuxService : nmAuxServices) {
            if (nmAuxService.equals(TIMELINE_AUX_SERVICE_NAME)) {
                enableTimelineAuxService = true;
                break;
            }
        }
    }
    if (enableTimelineAuxService) {
        conf.setStrings(YarnConfiguration.NM_AUX_SERVICES, new String[] { ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID, TIMELINE_AUX_SERVICE_NAME });
    } else {
        conf.setStrings(YarnConfiguration.NM_AUX_SERVICES, new String[] { ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID });
    }
    conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID), ShuffleHandler.class, Service.class);
    // Non-standard shuffle port
    conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);
    conf.setClass(YarnConfiguration.NM_CONTAINER_EXECUTOR, DefaultContainerExecutor.class, ContainerExecutor.class);
    // TestMRJobs is for testing non-uberized operation only; see TestUberAM
    // for corresponding uberized tests.
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    super.serviceInit(conf);
}
Also used : Path(org.apache.hadoop.fs.Path) YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) IOException(java.io.IOException) File(java.io.File) FileContext(org.apache.hadoop.fs.FileContext)

Example 8 with YarnRuntimeException

use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.

the class TestAsyncDispatcher method testDispatcherOnCloseIfQueueEmpty.

/* This test checks whether dispatcher hangs on close if following two things
   * happen :
   * 1. A thread which was putting event to event queue is interrupted.
   * 2. Event queue is empty on close.
   */
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test(timeout = 10000)
public void testDispatcherOnCloseIfQueueEmpty() throws Exception {
    BlockingQueue<Event> eventQueue = spy(new LinkedBlockingQueue<Event>());
    Event event = mock(Event.class);
    doThrow(new InterruptedException()).when(eventQueue).put(event);
    DrainDispatcher disp = new DrainDispatcher(eventQueue);
    disp.init(new Configuration());
    disp.setDrainEventsOnStop();
    disp.start();
    // Wait for event handler thread to start and begin waiting for events.
    disp.waitForEventThreadToWait();
    try {
        disp.getEventHandler().handle(event);
        Assert.fail("Expected YarnRuntimeException");
    } catch (YarnRuntimeException e) {
        Assert.assertTrue(e.getCause() instanceof InterruptedException);
    }
    // Queue should be empty and dispatcher should not hang on close
    Assert.assertTrue("Event Queue should have been empty", eventQueue.isEmpty());
    disp.close();
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) Test(org.junit.Test)

Example 9 with YarnRuntimeException

use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.

the class TestRPCFactories method testPbServerFactory.

private void testPbServerFactory() {
    InetSocketAddress addr = new InetSocketAddress(0);
    Configuration conf = new Configuration();
    ApplicationMasterProtocol instance = new AMRMProtocolTestImpl();
    Server server = null;
    try {
        server = RpcServerFactoryPBImpl.get().getServer(ApplicationMasterProtocol.class, instance, addr, conf, null, 1);
        server.start();
    } catch (YarnRuntimeException e) {
        e.printStackTrace();
        Assert.fail("Failed to create server");
    } finally {
        if (server != null) {
            server.stop();
        }
    }
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) Configuration(org.apache.hadoop.conf.Configuration) Server(org.apache.hadoop.ipc.Server) InetSocketAddress(java.net.InetSocketAddress) ApplicationMasterProtocol(org.apache.hadoop.yarn.api.ApplicationMasterProtocol)

Example 10 with YarnRuntimeException

use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.

the class TestRecordFactory method testPbRecordFactory.

@Test
public void testPbRecordFactory() {
    RecordFactory pbRecordFactory = RecordFactoryPBImpl.get();
    try {
        AllocateResponse response = pbRecordFactory.newRecordInstance(AllocateResponse.class);
        Assert.assertEquals(AllocateResponsePBImpl.class, response.getClass());
    } catch (YarnRuntimeException e) {
        e.printStackTrace();
        Assert.fail("Failed to crete record");
    }
    try {
        AllocateRequest response = pbRecordFactory.newRecordInstance(AllocateRequest.class);
        Assert.assertEquals(AllocateRequestPBImpl.class, response.getClass());
    } catch (YarnRuntimeException e) {
        e.printStackTrace();
        Assert.fail("Failed to crete record");
    }
}
Also used : AllocateResponse(org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse) YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) RecordFactory(org.apache.hadoop.yarn.factories.RecordFactory) AllocateRequest(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest) Test(org.junit.Test)

Aggregations

YarnRuntimeException (org.apache.hadoop.yarn.exceptions.YarnRuntimeException)147 IOException (java.io.IOException)56 Configuration (org.apache.hadoop.conf.Configuration)38 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)28 Test (org.junit.Test)28 YarnException (org.apache.hadoop.yarn.exceptions.YarnException)17 InetSocketAddress (java.net.InetSocketAddress)12 Path (org.apache.hadoop.fs.Path)12 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)9 InvocationTargetException (java.lang.reflect.InvocationTargetException)8 Server (org.apache.hadoop.ipc.Server)8 FileSystem (org.apache.hadoop.fs.FileSystem)7 FsPermission (org.apache.hadoop.fs.permission.FsPermission)7 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)7 FileNotFoundException (java.io.FileNotFoundException)6 ArrayList (java.util.ArrayList)6 HashMap (java.util.HashMap)6 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)6 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)6 ConnectException (java.net.ConnectException)5