Search in sources :

Example 1 with LlapDaemonConfiguration

use of org.apache.hadoop.hive.llap.configuration.LlapDaemonConfiguration in project hive by apache.

the class LlapItUtils method startAndGetMiniLlapCluster.

public static MiniLlapCluster startAndGetMiniLlapCluster(Configuration conf, MiniZooKeeperCluster miniZkCluster, String confDir) throws IOException {
    MiniLlapCluster llapCluster;
    LOG.info("Using conf dir: {}", confDir);
    if (confDir != null && !confDir.isEmpty()) {
        conf.addResource(new URL("file://" + new File(confDir).toURI().getPath() + "/tez-site.xml"));
    }
    Configuration daemonConf = new LlapDaemonConfiguration(conf);
    final String clusterName = "llap";
    final long maxMemory = LlapDaemon.getTotalHeapSize();
    // 15% for io cache
    final long memoryForCache = (long) (0.15f * maxMemory);
    // 75% for 4 executors
    final long totalExecutorMemory = (long) (0.75f * maxMemory);
    final int numExecutors = HiveConf.getIntVar(conf, HiveConf.ConfVars.LLAP_DAEMON_NUM_EXECUTORS);
    final boolean asyncIOEnabled = true;
    // enabling this will cause test failures in Mac OS X
    final boolean directMemoryEnabled = false;
    final int numLocalDirs = 1;
    LOG.info("MiniLlap Configs -  maxMemory: " + maxMemory + " memoryForCache: " + memoryForCache + " totalExecutorMemory: " + totalExecutorMemory + " numExecutors: " + numExecutors + " asyncIOEnabled: " + asyncIOEnabled + " directMemoryEnabled: " + directMemoryEnabled + " numLocalDirs: " + numLocalDirs);
    llapCluster = MiniLlapCluster.create(clusterName, miniZkCluster, 1, numExecutors, totalExecutorMemory, asyncIOEnabled, directMemoryEnabled, memoryForCache, numLocalDirs);
    llapCluster.init(daemonConf);
    llapCluster.start();
    // Augment conf with the settings from the started llap configuration.
    Configuration llapConf = llapCluster.getClusterSpecificConfiguration();
    Iterator<Map.Entry<String, String>> confIter = llapConf.iterator();
    while (confIter.hasNext()) {
        Map.Entry<String, String> entry = confIter.next();
        conf.set(entry.getKey(), entry.getValue());
    }
    return llapCluster;
}
Also used : LlapDaemonConfiguration(org.apache.hadoop.hive.llap.configuration.LlapDaemonConfiguration) Configuration(org.apache.hadoop.conf.Configuration) URL(java.net.URL) LlapDaemonConfiguration(org.apache.hadoop.hive.llap.configuration.LlapDaemonConfiguration) MiniLlapCluster(org.apache.hadoop.hive.llap.daemon.MiniLlapCluster) File(java.io.File) Map(java.util.Map)

Example 2 with LlapDaemonConfiguration

use of org.apache.hadoop.hive.llap.configuration.LlapDaemonConfiguration in project hive by apache.

the class LlapDaemon method main.

public static void main(String[] args) throws Exception {
    Thread.setDefaultUncaughtExceptionHandler(new LlapDaemonUncaughtExceptionHandler());
    LlapDaemon llapDaemon = null;
    try {
        // Cache settings will need to be setup in llap-daemon-site.xml - since the daemons don't read hive-site.xml
        // Ideally, these properties should be part of LlapDameonConf rather than HiveConf
        LlapDaemonConfiguration daemonConf = new LlapDaemonConfiguration();
        String containerIdStr = System.getenv(ApplicationConstants.Environment.CONTAINER_ID.name());
        String appName = null;
        if (containerIdStr != null && !containerIdStr.isEmpty()) {
            daemonConf.set(ConfVars.LLAP_DAEMON_CONTAINER_ID.varname, containerIdStr);
            appName = ConverterUtils.toContainerId(containerIdStr).getApplicationAttemptId().getApplicationId().toString();
        } else {
            daemonConf.unset(ConfVars.LLAP_DAEMON_CONTAINER_ID.varname);
            // Note, we assume production LLAP always runs under YARN.
            LOG.error("Cannot find " + ApplicationConstants.Environment.CONTAINER_ID.toString() + "; LLAP tokens may grant access to subsequent instances of the cluster with" + " the same name");
            appName = null;
        }
        String nmHost = System.getenv(ApplicationConstants.Environment.NM_HOST.name());
        String nmPort = System.getenv(ApplicationConstants.Environment.NM_PORT.name());
        if (!org.apache.commons.lang3.StringUtils.isBlank(nmHost) && !org.apache.commons.lang3.StringUtils.isBlank(nmPort)) {
            String nmAddress = nmHost + ":" + nmPort;
            daemonConf.set(ConfVars.LLAP_DAEMON_NM_ADDRESS.varname, nmAddress);
        } else {
            daemonConf.unset(ConfVars.LLAP_DAEMON_NM_ADDRESS.varname);
            // Unlikely, but log the actual values in case one of the two was empty/null
            LOG.warn("NodeManager host/port not found in environment. Values retrieved: host={}, port={}", nmHost, nmPort);
        }
        int numExecutors = HiveConf.getIntVar(daemonConf, ConfVars.LLAP_DAEMON_NUM_EXECUTORS);
        String workDirsString = System.getenv(ApplicationConstants.Environment.LOCAL_DIRS.name());
        String localDirList = LlapUtil.getDaemonLocalDirString(daemonConf, workDirsString);
        String[] localDirs = (localDirList == null || localDirList.isEmpty()) ? new String[0] : StringUtils.getTrimmedStrings(localDirList);
        int rpcPort = HiveConf.getIntVar(daemonConf, ConfVars.LLAP_DAEMON_RPC_PORT);
        int mngPort = HiveConf.getIntVar(daemonConf, ConfVars.LLAP_MANAGEMENT_RPC_PORT);
        int shufflePort = daemonConf.getInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, ShuffleHandler.DEFAULT_SHUFFLE_PORT);
        int webPort = HiveConf.getIntVar(daemonConf, ConfVars.LLAP_DAEMON_WEB_PORT);
        long executorMemoryBytes = HiveConf.getIntVar(daemonConf, ConfVars.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB) * 1024l * 1024l;
        long ioMemoryBytes = HiveConf.getSizeVar(daemonConf, ConfVars.LLAP_IO_MEMORY_MAX_SIZE);
        boolean isDirectCache = HiveConf.getBoolVar(daemonConf, ConfVars.LLAP_ALLOCATOR_DIRECT);
        boolean isLlapIo = HiveConf.getBoolVar(daemonConf, HiveConf.ConfVars.LLAP_IO_ENABLED, true);
        LlapDaemon.initializeLogging(daemonConf);
        llapDaemon = new LlapDaemon(daemonConf, numExecutors, executorMemoryBytes, isLlapIo, isDirectCache, ioMemoryBytes, localDirs, rpcPort, mngPort, shufflePort, webPort, appName);
        LOG.info("Adding shutdown hook for LlapDaemon");
        ShutdownHookManager.addShutdownHook(new CompositeServiceShutdownHook(llapDaemon), 1);
        llapDaemon.init(daemonConf);
        llapDaemon.start();
        LOG.info("Started LlapDaemon");
    // Relying on the RPC threads to keep the service alive.
    } catch (Throwable t) {
        // TODO Replace this with a ExceptionHandler / ShutdownHook
        LOG.warn("Failed to start LLAP Daemon with exception", t);
        if (llapDaemon != null) {
            llapDaemon.shutdown();
        }
        System.exit(-1);
    }
}
Also used : LlapDaemonConfiguration(org.apache.hadoop.hive.llap.configuration.LlapDaemonConfiguration)

Example 3 with LlapDaemonConfiguration

use of org.apache.hadoop.hive.llap.configuration.LlapDaemonConfiguration in project hive by apache.

the class TestLlapDaemonProtocolServerImpl method testSimpleCall.

@Test(timeout = 10000)
public void testSimpleCall() throws ServiceException, IOException {
    LlapDaemonConfiguration daemonConf = new LlapDaemonConfiguration();
    int numHandlers = HiveConf.getIntVar(daemonConf, ConfVars.LLAP_DAEMON_RPC_NUM_HANDLERS);
    ContainerRunner containerRunnerMock = mock(ContainerRunner.class);
    LlapProtocolServerImpl server = new LlapProtocolServerImpl(null, numHandlers, containerRunnerMock, new AtomicReference<InetSocketAddress>(), new AtomicReference<InetSocketAddress>(), 0, 0, null);
    when(containerRunnerMock.submitWork(any(SubmitWorkRequestProto.class))).thenReturn(SubmitWorkResponseProto.newBuilder().setSubmissionState(SubmissionStateProto.ACCEPTED).build());
    try {
        server.init(new Configuration());
        server.start();
        InetSocketAddress serverAddr = server.getBindAddress();
        LlapProtocolBlockingPB client = new LlapProtocolClientImpl(new Configuration(), serverAddr.getHostName(), serverAddr.getPort(), null, null, null);
        SubmitWorkResponseProto responseProto = client.submitWork(null, SubmitWorkRequestProto.newBuilder().setAmHost("amhost").setAmPort(2000).build());
        assertEquals(responseProto.getSubmissionState().name(), SubmissionStateProto.ACCEPTED.name());
    } finally {
        server.stop();
    }
}
Also used : LlapDaemonConfiguration(org.apache.hadoop.hive.llap.configuration.LlapDaemonConfiguration) ContainerRunner(org.apache.hadoop.hive.llap.daemon.ContainerRunner) LlapDaemonConfiguration(org.apache.hadoop.hive.llap.configuration.LlapDaemonConfiguration) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) SubmitWorkRequestProto(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto) SubmitWorkResponseProto(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto) LlapProtocolClientImpl(org.apache.hadoop.hive.llap.impl.LlapProtocolClientImpl) LlapProtocolBlockingPB(org.apache.hadoop.hive.llap.protocol.LlapProtocolBlockingPB) Test(org.junit.Test)

Aggregations

LlapDaemonConfiguration (org.apache.hadoop.hive.llap.configuration.LlapDaemonConfiguration)3 Configuration (org.apache.hadoop.conf.Configuration)2 File (java.io.File)1 InetSocketAddress (java.net.InetSocketAddress)1 URL (java.net.URL)1 Map (java.util.Map)1 ContainerRunner (org.apache.hadoop.hive.llap.daemon.ContainerRunner)1 MiniLlapCluster (org.apache.hadoop.hive.llap.daemon.MiniLlapCluster)1 SubmitWorkRequestProto (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto)1 SubmitWorkResponseProto (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto)1 LlapProtocolClientImpl (org.apache.hadoop.hive.llap.impl.LlapProtocolClientImpl)1 LlapProtocolBlockingPB (org.apache.hadoop.hive.llap.protocol.LlapProtocolBlockingPB)1 Test (org.junit.Test)1