Search in sources :

Example 91 with YarnRuntimeException

use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.

the class TestYSCRPCFactories method testPbClientFactory.

private void testPbClientFactory() {
    InetSocketAddress addr = new InetSocketAddress(0);
    System.err.println(addr.getHostName() + addr.getPort());
    Configuration conf = new Configuration();
    ResourceTracker instance = new ResourceTrackerTestImpl();
    Server server = null;
    try {
        server = RpcServerFactoryPBImpl.get().getServer(ResourceTracker.class, instance, addr, conf, null, 1);
        server.start();
        System.err.println(server.getListenerAddress());
        System.err.println(NetUtils.getConnectAddress(server));
        ResourceTracker client = null;
        try {
            client = (ResourceTracker) RpcClientFactoryPBImpl.get().getClient(ResourceTracker.class, 1, NetUtils.getConnectAddress(server), conf);
        } catch (YarnRuntimeException e) {
            e.printStackTrace();
            Assert.fail("Failed to create client");
        }
    } catch (YarnRuntimeException e) {
        e.printStackTrace();
        Assert.fail("Failed to create server");
    } finally {
        server.stop();
    }
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) Configuration(org.apache.hadoop.conf.Configuration) Server(org.apache.hadoop.ipc.Server) InetSocketAddress(java.net.InetSocketAddress) ResourceTracker(org.apache.hadoop.yarn.server.api.ResourceTracker)

Example 92 with YarnRuntimeException

use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.

the class TestYSCRecordFactory method testPbRecordFactory.

@Test
public void testPbRecordFactory() {
    RecordFactory pbRecordFactory = RecordFactoryPBImpl.get();
    try {
        NodeHeartbeatRequest request = pbRecordFactory.newRecordInstance(NodeHeartbeatRequest.class);
        Assert.assertEquals(NodeHeartbeatRequestPBImpl.class, request.getClass());
    } catch (YarnRuntimeException e) {
        e.printStackTrace();
        Assert.fail("Failed to crete record");
    }
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) RecordFactory(org.apache.hadoop.yarn.factories.RecordFactory) NodeHeartbeatRequest(org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest) Test(org.junit.Test)

Example 93 with YarnRuntimeException

use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.

the class LocalDirsHandlerService method serviceInit.

/**
   * Method which initializes the timertask and its interval time.
   * 
   */
@Override
protected void serviceInit(Configuration config) throws Exception {
    // Clone the configuration as we may do modifications to dirs-list
    Configuration conf = new Configuration(config);
    diskHealthCheckInterval = conf.getLong(YarnConfiguration.NM_DISK_HEALTH_CHECK_INTERVAL_MS, YarnConfiguration.DEFAULT_NM_DISK_HEALTH_CHECK_INTERVAL_MS);
    monitoringTimerTask = new MonitoringTimerTask(conf);
    isDiskHealthCheckerEnabled = conf.getBoolean(YarnConfiguration.NM_DISK_HEALTH_CHECK_ENABLE, true);
    minNeededHealthyDisksFactor = conf.getFloat(YarnConfiguration.NM_MIN_HEALTHY_DISKS_FRACTION, YarnConfiguration.DEFAULT_NM_MIN_HEALTHY_DISKS_FRACTION);
    lastDisksCheckTime = System.currentTimeMillis();
    super.serviceInit(conf);
    FileContext localFs;
    try {
        localFs = FileContext.getLocalFSFileContext(config);
    } catch (IOException e) {
        throw new YarnRuntimeException("Unable to get the local filesystem", e);
    }
    FsPermission perm = new FsPermission((short) 0755);
    boolean createSucceeded = localDirs.createNonExistentDirs(localFs, perm);
    createSucceeded &= logDirs.createNonExistentDirs(localFs, perm);
    if (!createSucceeded) {
        updateDirsAfterTest();
    }
    // Check the disk health immediately to weed out bad directories
    // before other init code attempts to use them.
    checkDirs();
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) IOException(java.io.IOException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) FileContext(org.apache.hadoop.fs.FileContext)

Example 94 with YarnRuntimeException

use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.

the class AMRMProxyService method processApplicationStartRequest.

/**
   * Callback from the ContainerManager implementation for initializing the
   * application request processing pipeline.
   *
   * @param request - encapsulates information for starting an AM
   * @throws IOException
   * @throws YarnException
   */
public void processApplicationStartRequest(StartContainerRequest request) throws IOException, YarnException {
    LOG.info("Callback received for initializing request " + "processing pipeline for an AM");
    ContainerTokenIdentifier containerTokenIdentifierForKey = BuilderUtils.newContainerTokenIdentifier(request.getContainerToken());
    ApplicationAttemptId appAttemptId = containerTokenIdentifierForKey.getContainerID().getApplicationAttemptId();
    Credentials credentials = YarnServerSecurityUtils.parseCredentials(request.getContainerLaunchContext());
    Token<AMRMTokenIdentifier> amrmToken = getFirstAMRMToken(credentials.getAllTokens());
    if (amrmToken == null) {
        throw new YarnRuntimeException("AMRMToken not found in the start container request for application:" + appAttemptId.toString());
    }
    // Substitute the existing AMRM Token with a local one. Keep the rest of the
    // tokens in the credentials intact.
    Token<AMRMTokenIdentifier> localToken = this.secretManager.createAndGetAMRMToken(appAttemptId);
    credentials.addToken(localToken.getService(), localToken);
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    request.getContainerLaunchContext().setTokens(ByteBuffer.wrap(dob.getData(), 0, dob.getLength()));
    initializePipeline(containerTokenIdentifierForKey.getContainerID().getApplicationAttemptId(), containerTokenIdentifierForKey.getApplicationSubmitter(), amrmToken, localToken);
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) AMRMTokenIdentifier(org.apache.hadoop.yarn.security.AMRMTokenIdentifier) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) Credentials(org.apache.hadoop.security.Credentials) ContainerTokenIdentifier(org.apache.hadoop.yarn.security.ContainerTokenIdentifier)

Example 95 with YarnRuntimeException

use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.

the class AMRMProxyService method createRequestInterceptorChain.

/**
   * This method creates and returns reference of the first intercepter in the
   * chain of request intercepter instances.
   *
   * @return the reference of the first intercepter in the chain
   */
protected RequestInterceptor createRequestInterceptorChain() {
    Configuration conf = getConfig();
    List<String> interceptorClassNames = getInterceptorClassNames(conf);
    RequestInterceptor pipeline = null;
    RequestInterceptor current = null;
    for (String interceptorClassName : interceptorClassNames) {
        try {
            Class<?> interceptorClass = conf.getClassByName(interceptorClassName);
            if (RequestInterceptor.class.isAssignableFrom(interceptorClass)) {
                RequestInterceptor interceptorInstance = (RequestInterceptor) ReflectionUtils.newInstance(interceptorClass, conf);
                if (pipeline == null) {
                    pipeline = interceptorInstance;
                    current = interceptorInstance;
                    continue;
                } else {
                    current.setNextInterceptor(interceptorInstance);
                    current = interceptorInstance;
                }
            } else {
                throw new YarnRuntimeException("Class: " + interceptorClassName + " not instance of " + RequestInterceptor.class.getCanonicalName());
            }
        } catch (ClassNotFoundException e) {
            throw new YarnRuntimeException("Could not instantiate ApplicationMasterRequestInterceptor: " + interceptorClassName, e);
        }
    }
    if (pipeline == null) {
        throw new YarnRuntimeException("RequestInterceptor pipeline is not configured in the system");
    }
    return pipeline;
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration)

Aggregations

YarnRuntimeException (org.apache.hadoop.yarn.exceptions.YarnRuntimeException)147 IOException (java.io.IOException)56 Configuration (org.apache.hadoop.conf.Configuration)38 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)28 Test (org.junit.Test)28 YarnException (org.apache.hadoop.yarn.exceptions.YarnException)17 InetSocketAddress (java.net.InetSocketAddress)12 Path (org.apache.hadoop.fs.Path)12 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)9 InvocationTargetException (java.lang.reflect.InvocationTargetException)8 Server (org.apache.hadoop.ipc.Server)8 FileSystem (org.apache.hadoop.fs.FileSystem)7 FsPermission (org.apache.hadoop.fs.permission.FsPermission)7 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)7 FileNotFoundException (java.io.FileNotFoundException)6 ArrayList (java.util.ArrayList)6 HashMap (java.util.HashMap)6 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)6 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)6 ConnectException (java.net.ConnectException)5