Search in sources :

Example 6 with LocalResource

use of org.apache.hadoop.yarn.api.records.LocalResource in project hadoop by apache.

the class TestContainersMonitor method testContainerKillOnMemoryOverflow.

@Test
public void testContainerKillOnMemoryOverflow() throws IOException, InterruptedException, YarnException {
    if (!ProcfsBasedProcessTree.isAvailable()) {
        return;
    }
    containerManager.start();
    File scriptFile = new File(tmpDir, "scriptFile.sh");
    PrintWriter fileWriter = new PrintWriter(scriptFile);
    File processStartFile = new File(tmpDir, "start_file.txt").getAbsoluteFile();
    // So that start file is readable by the
    fileWriter.write("\numask 0");
    // test.
    fileWriter.write("\necho Hello World! > " + processStartFile);
    fileWriter.write("\necho $$ >> " + processStartFile);
    fileWriter.write("\nsleep 15");
    fileWriter.close();
    ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
    // ////// Construct the Container-id
    ApplicationId appId = ApplicationId.newInstance(0, 0);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
    ContainerId cId = ContainerId.newContainerId(appAttemptId, 0);
    URL resource_alpha = URL.fromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
    LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class);
    rsrc_alpha.setResource(resource_alpha);
    rsrc_alpha.setSize(-1);
    rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
    rsrc_alpha.setType(LocalResourceType.FILE);
    rsrc_alpha.setTimestamp(scriptFile.lastModified());
    String destinationFile = "dest_file";
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    localResources.put(destinationFile, rsrc_alpha);
    containerLaunchContext.setLocalResources(localResources);
    List<String> commands = new ArrayList<String>();
    commands.add("/bin/bash");
    commands.add(scriptFile.getAbsolutePath());
    containerLaunchContext.setCommands(commands);
    Resource r = BuilderUtils.newResource(0, 0);
    ContainerTokenIdentifier containerIdentifier = new ContainerTokenIdentifier(cId, context.getNodeId().toString(), user, r, System.currentTimeMillis() + 120000, 123, DUMMY_RM_IDENTIFIER, Priority.newInstance(0), 0);
    Token containerToken = BuilderUtils.newContainerToken(context.getNodeId(), containerManager.getContext().getContainerTokenSecretManager().createPassword(containerIdentifier), containerIdentifier);
    StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, containerToken);
    List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
    list.add(scRequest);
    StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
    containerManager.startContainers(allRequests);
    int timeoutSecs = 0;
    while (!processStartFile.exists() && timeoutSecs++ < 20) {
        Thread.sleep(1000);
        LOG.info("Waiting for process start-file to be created");
    }
    Assert.assertTrue("ProcessStartFile doesn't exist!", processStartFile.exists());
    // Now verify the contents of the file
    BufferedReader reader = new BufferedReader(new FileReader(processStartFile));
    Assert.assertEquals("Hello World!", reader.readLine());
    // Get the pid of the process
    String pid = reader.readLine().trim();
    // No more lines
    Assert.assertEquals(null, reader.readLine());
    BaseContainerManagerTest.waitForContainerState(containerManager, cId, ContainerState.COMPLETE, 60);
    List<ContainerId> containerIds = new ArrayList<ContainerId>();
    containerIds.add(cId);
    GetContainerStatusesRequest gcsRequest = GetContainerStatusesRequest.newInstance(containerIds);
    ContainerStatus containerStatus = containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
    Assert.assertEquals(ContainerExitStatus.KILLED_EXCEEDED_VMEM, containerStatus.getExitStatus());
    String expectedMsgPattern = "Container \\[pid=" + pid + ",containerID=" + cId + "\\] is running beyond virtual memory limits. Current usage: " + "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B physical memory used; " + "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B virtual memory used. " + "Killing container.\nDump of the process-tree for " + cId + " :\n";
    Pattern pat = Pattern.compile(expectedMsgPattern);
    Assert.assertEquals("Expected message pattern is: " + expectedMsgPattern + "\n\nObserved message is: " + containerStatus.getDiagnostics(), true, pat.matcher(containerStatus.getDiagnostics()).find());
    // Assert that the process is not alive anymore
    Assert.assertFalse("Process is still alive!", exec.signalContainer(new ContainerSignalContext.Builder().setUser(user).setPid(pid).setSignal(Signal.NULL).build()));
}
Also used : HashMap(java.util.HashMap) GetContainerStatusesRequest(org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest) ArrayList(java.util.ArrayList) Token(org.apache.hadoop.yarn.api.records.Token) URL(org.apache.hadoop.yarn.api.records.URL) ContainerTokenIdentifier(org.apache.hadoop.yarn.security.ContainerTokenIdentifier) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) FileReader(java.io.FileReader) PrintWriter(java.io.PrintWriter) Path(org.apache.hadoop.fs.Path) StartContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest) Pattern(java.util.regex.Pattern) Resource(org.apache.hadoop.yarn.api.records.Resource) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) StartContainerRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) BufferedReader(java.io.BufferedReader) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) BaseContainerManagerTest(org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest) Test(org.junit.Test)

Example 7 with LocalResource

use of org.apache.hadoop.yarn.api.records.LocalResource in project hadoop by apache.

the class TestDelegationTokenRenewer method testAppSubmissionWithInvalidDelegationToken.

@Test(timeout = 20000)
public void testAppSubmissionWithInvalidDelegationToken() throws Exception {
    Configuration conf = new Configuration();
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);
    MockRM rm = new MockRM(conf) {

        @Override
        protected void doSecureLogin() throws IOException {
        // Skip the login.
        }
    };
    ByteBuffer tokens = ByteBuffer.wrap("BOGUS".getBytes());
    ContainerLaunchContext amContainer = ContainerLaunchContext.newInstance(new HashMap<String, LocalResource>(), new HashMap<String, String>(), new ArrayList<String>(), new HashMap<String, ByteBuffer>(), tokens, new HashMap<ApplicationAccessType, String>());
    ApplicationSubmissionContext appSubContext = ApplicationSubmissionContext.newInstance(ApplicationId.newInstance(1234121, 0), "BOGUS", "default", Priority.UNDEFINED, amContainer, false, true, 1, Resource.newInstance(1024, 1), "BOGUS");
    SubmitApplicationRequest request = SubmitApplicationRequest.newInstance(appSubContext);
    try {
        rm.getClientRMService().submitApplication(request);
        fail("Error was excepted.");
    } catch (YarnException e) {
        Assert.assertTrue(e.getMessage().contains("Bad header found in token storage"));
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) MockRM(org.apache.hadoop.yarn.server.resourcemanager.MockRM) TestSecurityMockRM(org.apache.hadoop.yarn.server.resourcemanager.TestRMRestart.TestSecurityMockRM) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ByteBuffer(java.nio.ByteBuffer) DataInputByteBuffer(org.apache.hadoop.io.DataInputByteBuffer) SubmitApplicationRequest(org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) ApplicationAccessType(org.apache.hadoop.yarn.api.records.ApplicationAccessType) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) Test(org.junit.Test)

Example 8 with LocalResource

use of org.apache.hadoop.yarn.api.records.LocalResource in project hadoop by apache.

the class TestRMWebServicesAppsModification method testAppSubmit.

public void testAppSubmit(String acceptMedia, String contentMedia) throws Exception {
    // create a test app and submit it via rest(after getting an app-id) then
    // get the app details from the rmcontext and check that everything matches
    client().addFilter(new LoggingFilter(System.out));
    String lrKey = "example";
    String queueName = "testqueue";
    // create the queue
    String[] queues = { "default", "testqueue" };
    CapacitySchedulerConfiguration csconf = new CapacitySchedulerConfiguration();
    csconf.setQueues("root", queues);
    csconf.setCapacity("root.default", 50.0f);
    csconf.setCapacity("root.testqueue", 50.0f);
    rm.getResourceScheduler().reinitialize(csconf, rm.getRMContext());
    String appName = "test";
    String appType = "test-type";
    String urlPath = "apps";
    String appId = testGetNewApplication(acceptMedia);
    List<String> commands = new ArrayList<>();
    commands.add("/bin/sleep 5");
    HashMap<String, String> environment = new HashMap<>();
    environment.put("APP_VAR", "ENV_SETTING");
    HashMap<ApplicationAccessType, String> acls = new HashMap<>();
    acls.put(ApplicationAccessType.MODIFY_APP, "testuser1, testuser2");
    acls.put(ApplicationAccessType.VIEW_APP, "testuser3, testuser4");
    Set<String> tags = new HashSet<>();
    tags.add("tag1");
    tags.add("tag 2");
    CredentialsInfo credentials = new CredentialsInfo();
    HashMap<String, String> tokens = new HashMap<>();
    HashMap<String, String> secrets = new HashMap<>();
    secrets.put("secret1", Base64.encodeBase64String("mysecret".getBytes("UTF8")));
    credentials.setSecrets(secrets);
    credentials.setTokens(tokens);
    ApplicationSubmissionContextInfo appInfo = new ApplicationSubmissionContextInfo();
    appInfo.setApplicationId(appId);
    appInfo.setApplicationName(appName);
    appInfo.setMaxAppAttempts(2);
    appInfo.setQueue(queueName);
    appInfo.setApplicationType(appType);
    appInfo.setPriority(0);
    HashMap<String, LocalResourceInfo> lr = new HashMap<>();
    LocalResourceInfo y = new LocalResourceInfo();
    y.setUrl(new URI("http://www.test.com/file.txt"));
    y.setSize(100);
    y.setTimestamp(System.currentTimeMillis());
    y.setType(LocalResourceType.FILE);
    y.setVisibility(LocalResourceVisibility.APPLICATION);
    lr.put(lrKey, y);
    appInfo.getContainerLaunchContextInfo().setResources(lr);
    appInfo.getContainerLaunchContextInfo().setCommands(commands);
    appInfo.getContainerLaunchContextInfo().setEnvironment(environment);
    appInfo.getContainerLaunchContextInfo().setAcls(acls);
    appInfo.getContainerLaunchContextInfo().getAuxillaryServiceData().put("test", Base64.encodeBase64URLSafeString("value12".getBytes("UTF8")));
    appInfo.getContainerLaunchContextInfo().setCredentials(credentials);
    appInfo.getResource().setMemory(1024);
    appInfo.getResource().setvCores(1);
    appInfo.setApplicationTags(tags);
    // Set LogAggregationContextInfo
    String includePattern = "file1";
    String excludePattern = "file2";
    String rolledLogsIncludePattern = "file3";
    String rolledLogsExcludePattern = "file4";
    String className = "policy_class";
    String parameters = "policy_parameter";
    LogAggregationContextInfo logAggregationContextInfo = new LogAggregationContextInfo();
    logAggregationContextInfo.setIncludePattern(includePattern);
    logAggregationContextInfo.setExcludePattern(excludePattern);
    logAggregationContextInfo.setRolledLogsIncludePattern(rolledLogsIncludePattern);
    logAggregationContextInfo.setRolledLogsExcludePattern(rolledLogsExcludePattern);
    logAggregationContextInfo.setLogAggregationPolicyClassName(className);
    logAggregationContextInfo.setLogAggregationPolicyParameters(parameters);
    appInfo.setLogAggregationContextInfo(logAggregationContextInfo);
    // Set attemptFailuresValidityInterval
    long attemptFailuresValidityInterval = 5000;
    appInfo.setAttemptFailuresValidityInterval(attemptFailuresValidityInterval);
    // Set ReservationId
    String reservationId = ReservationId.newInstance(System.currentTimeMillis(), 1).toString();
    appInfo.setReservationId(reservationId);
    ClientResponse response = this.constructWebResource(urlPath).accept(acceptMedia).entity(appInfo, contentMedia).post(ClientResponse.class);
    if (!this.isAuthenticationEnabled()) {
        assertResponseStatusCode(Status.UNAUTHORIZED, response.getStatusInfo());
        return;
    }
    assertResponseStatusCode(Status.ACCEPTED, response.getStatusInfo());
    assertTrue(!response.getHeaders().getFirst(HttpHeaders.LOCATION).isEmpty());
    String locURL = response.getHeaders().getFirst(HttpHeaders.LOCATION);
    assertTrue(locURL.contains("/apps/application"));
    appId = locURL.substring(locURL.indexOf("/apps/") + "/apps/".length());
    WebResource res = resource().uri(new URI(locURL));
    res = res.queryParam("user.name", webserviceUserName);
    response = res.get(ClientResponse.class);
    assertResponseStatusCode(Status.OK, response.getStatusInfo());
    RMApp app = rm.getRMContext().getRMApps().get(ApplicationId.fromString(appId));
    assertEquals(appName, app.getName());
    assertEquals(webserviceUserName, app.getUser());
    assertEquals(2, app.getMaxAppAttempts());
    if (app.getQueue().contains("root.")) {
        queueName = "root." + queueName;
    }
    assertEquals(queueName, app.getQueue());
    assertEquals(appType, app.getApplicationType());
    assertEquals(tags, app.getApplicationTags());
    ContainerLaunchContext ctx = app.getApplicationSubmissionContext().getAMContainerSpec();
    assertEquals(commands, ctx.getCommands());
    assertEquals(environment, ctx.getEnvironment());
    assertEquals(acls, ctx.getApplicationACLs());
    Map<String, LocalResource> appLRs = ctx.getLocalResources();
    assertTrue(appLRs.containsKey(lrKey));
    LocalResource exampleLR = appLRs.get(lrKey);
    assertEquals(URL.fromURI(y.getUrl()), exampleLR.getResource());
    assertEquals(y.getSize(), exampleLR.getSize());
    assertEquals(y.getTimestamp(), exampleLR.getTimestamp());
    assertEquals(y.getType(), exampleLR.getType());
    assertEquals(y.getPattern(), exampleLR.getPattern());
    assertEquals(y.getVisibility(), exampleLR.getVisibility());
    Credentials cs = new Credentials();
    ByteArrayInputStream str = new ByteArrayInputStream(app.getApplicationSubmissionContext().getAMContainerSpec().getTokens().array());
    DataInputStream di = new DataInputStream(str);
    cs.readTokenStorageStream(di);
    Text key = new Text("secret1");
    assertTrue("Secrets missing from credentials object", cs.getAllSecretKeys().contains(key));
    assertEquals("mysecret", new String(cs.getSecretKey(key), "UTF-8"));
    // Check LogAggregationContext
    ApplicationSubmissionContext asc = app.getApplicationSubmissionContext();
    LogAggregationContext lac = asc.getLogAggregationContext();
    assertEquals(includePattern, lac.getIncludePattern());
    assertEquals(excludePattern, lac.getExcludePattern());
    assertEquals(rolledLogsIncludePattern, lac.getRolledLogsIncludePattern());
    assertEquals(rolledLogsExcludePattern, lac.getRolledLogsExcludePattern());
    assertEquals(className, lac.getLogAggregationPolicyClassName());
    assertEquals(parameters, lac.getLogAggregationPolicyParameters());
    // Check attemptFailuresValidityInterval
    assertEquals(attemptFailuresValidityInterval, asc.getAttemptFailuresValidityInterval());
    // Check ReservationId
    assertEquals(reservationId, app.getReservationId().toString());
    response = this.constructWebResource("apps", appId).accept(acceptMedia).get(ClientResponse.class);
    assertResponseStatusCode(Status.OK, response.getStatusInfo());
}
Also used : ClientResponse(com.sun.jersey.api.client.ClientResponse) RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) HashMap(java.util.HashMap) CredentialsInfo(org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CredentialsInfo) LoggingFilter(com.sun.jersey.api.client.filter.LoggingFilter) ArrayList(java.util.ArrayList) WebResource(com.sun.jersey.api.client.WebResource) ApplicationSubmissionContextInfo(org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo) URI(java.net.URI) LogAggregationContextInfo(org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LogAggregationContextInfo) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) CapacitySchedulerConfiguration(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration) HashSet(java.util.HashSet) Text(org.apache.hadoop.io.Text) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) DataInputStream(java.io.DataInputStream) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) LocalResourceInfo(org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LocalResourceInfo) ApplicationAccessType(org.apache.hadoop.yarn.api.records.ApplicationAccessType) ByteArrayInputStream(java.io.ByteArrayInputStream) Credentials(org.apache.hadoop.security.Credentials) LogAggregationContext(org.apache.hadoop.yarn.api.records.LogAggregationContext)

Example 9 with LocalResource

use of org.apache.hadoop.yarn.api.records.LocalResource in project hive by apache.

the class GenericUDTFGetSplits method getSplits.

public InputSplit[] getSplits(JobConf job, int numSplits, TezWork work, Schema schema) throws IOException {
    DAG dag = DAG.create(work.getName());
    dag.setCredentials(job.getCredentials());
    DagUtils utils = DagUtils.getInstance();
    Context ctx = new Context(job);
    MapWork mapWork = (MapWork) work.getAllWork().get(0);
    // bunch of things get setup in the context based on conf but we need only the MR tmp directory
    // for the following method.
    JobConf wxConf = utils.initializeVertexConf(job, ctx, mapWork);
    // TODO: should we also whitelist input formats here? from mapred.input.format.class
    Path scratchDir = utils.createTezDir(ctx.getMRScratchDir(), job);
    FileSystem fs = scratchDir.getFileSystem(job);
    try {
        LocalResource appJarLr = createJarLocalResource(utils.getExecJarPathLocal(), utils, job);
        Vertex wx = utils.createVertex(wxConf, mapWork, scratchDir, appJarLr, new ArrayList<LocalResource>(), fs, ctx, false, work, work.getVertexType(mapWork));
        String vertexName = wx.getName();
        dag.addVertex(wx);
        utils.addCredentials(mapWork, dag);
        // we have the dag now proceed to get the splits:
        Preconditions.checkState(HiveConf.getBoolVar(wxConf, ConfVars.HIVE_TEZ_GENERATE_CONSISTENT_SPLITS));
        Preconditions.checkState(HiveConf.getBoolVar(wxConf, ConfVars.LLAP_CLIENT_CONSISTENT_SPLITS));
        HiveSplitGenerator splitGenerator = new HiveSplitGenerator(wxConf, mapWork);
        List<Event> eventList = splitGenerator.initialize();
        InputSplit[] result = new InputSplit[eventList.size() - 1];
        InputConfigureVertexTasksEvent configureEvent = (InputConfigureVertexTasksEvent) eventList.get(0);
        List<TaskLocationHint> hints = configureEvent.getLocationHint().getTaskLocationHints();
        Preconditions.checkState(hints.size() == eventList.size() - 1);
        if (LOG.isDebugEnabled()) {
            LOG.debug("NumEvents=" + eventList.size() + ", NumSplits=" + result.length);
        }
        LlapCoordinator coordinator = LlapCoordinator.getInstance();
        if (coordinator == null) {
            throw new IOException("LLAP coordinator is not initialized; must be running in HS2 with " + ConfVars.LLAP_HS2_ENABLE_COORDINATOR.varname + " enabled");
        }
        // See the discussion in the implementation as to why we generate app ID.
        ApplicationId applicationId = coordinator.createExtClientAppId();
        // This assumes LLAP cluster owner is always the HS2 user.
        String llapUser = UserGroupInformation.getLoginUser().getShortUserName();
        String queryUser = null;
        byte[] tokenBytes = null;
        LlapSigner signer = null;
        if (UserGroupInformation.isSecurityEnabled()) {
            signer = coordinator.getLlapSigner(job);
            // 1. Generate the token for query user (applies to all splits).
            queryUser = SessionState.getUserFromAuthenticator();
            if (queryUser == null) {
                queryUser = UserGroupInformation.getCurrentUser().getUserName();
                LOG.warn("Cannot determine the session user; using " + queryUser + " instead");
            }
            LlapTokenLocalClient tokenClient = coordinator.getLocalTokenClient(job, llapUser);
            // We put the query user, not LLAP user, into the message and token.
            Token<LlapTokenIdentifier> token = tokenClient.createToken(applicationId.toString(), queryUser, true);
            LOG.info("Created the token for remote user: {}", token);
            bos.reset();
            token.write(dos);
            tokenBytes = bos.toByteArray();
        } else {
            queryUser = UserGroupInformation.getCurrentUser().getUserName();
        }
        LOG.info("Number of splits: " + (eventList.size() - 1));
        SignedMessage signedSvs = null;
        for (int i = 0; i < eventList.size() - 1; i++) {
            TaskSpec taskSpec = new TaskSpecBuilder().constructTaskSpec(dag, vertexName, eventList.size() - 1, applicationId, i);
            // 2. Generate the vertex/submit information for all events.
            if (i == 0) {
                // The queryId could either be picked up from the current request being processed, or
                // generated. The current request isn't exactly correct since the query is 'done' once we
                // return the results. Generating a new one has the added benefit of working once this
                // is moved out of a UDTF into a proper API.
                // Setting this to the generated AppId which is unique.
                // Despite the differences in TaskSpec, the vertex spec should be the same.
                signedSvs = createSignedVertexSpec(signer, taskSpec, applicationId, queryUser, applicationId.toString());
            }
            SubmitWorkInfo submitWorkInfo = new SubmitWorkInfo(applicationId, System.currentTimeMillis(), taskSpec.getVertexParallelism(), signedSvs.message, signedSvs.signature);
            byte[] submitWorkBytes = SubmitWorkInfo.toBytes(submitWorkInfo);
            // 3. Generate input event.
            SignedMessage eventBytes = makeEventBytes(wx, vertexName, eventList.get(i + 1), signer);
            // 4. Make location hints.
            SplitLocationInfo[] locations = makeLocationHints(hints.get(i));
            result[i] = new LlapInputSplit(i, submitWorkBytes, eventBytes.message, eventBytes.signature, locations, schema, llapUser, tokenBytes);
        }
        return result;
    } catch (Exception e) {
        throw new IOException(e);
    }
}
Also used : Vertex(org.apache.tez.dag.api.Vertex) SubmitWorkInfo(org.apache.hadoop.hive.llap.SubmitWorkInfo) LlapTokenIdentifier(org.apache.hadoop.hive.llap.security.LlapTokenIdentifier) SplitLocationInfo(org.apache.hadoop.mapred.SplitLocationInfo) HiveSplitGenerator(org.apache.hadoop.hive.ql.exec.tez.HiveSplitGenerator) TaskSpecBuilder(org.apache.tez.dag.api.TaskSpecBuilder) LlapSigner(org.apache.hadoop.hive.llap.security.LlapSigner) TaskLocationHint(org.apache.tez.dag.api.TaskLocationHint) LlapTokenLocalClient(org.apache.hadoop.hive.llap.security.LlapTokenLocalClient) DagUtils(org.apache.hadoop.hive.ql.exec.tez.DagUtils) LlapInputSplit(org.apache.hadoop.hive.llap.LlapInputSplit) FileSystem(org.apache.hadoop.fs.FileSystem) JobConf(org.apache.hadoop.mapred.JobConf) LlapInputSplit(org.apache.hadoop.hive.llap.LlapInputSplit) InputSplit(org.apache.hadoop.mapred.InputSplit) Context(org.apache.hadoop.hive.ql.Context) Path(org.apache.hadoop.fs.Path) TaskSpec(org.apache.tez.runtime.api.impl.TaskSpec) SignedMessage(org.apache.hadoop.hive.llap.security.LlapSigner.SignedMessage) DAG(org.apache.tez.dag.api.DAG) IOException(java.io.IOException) LlapCoordinator(org.apache.hadoop.hive.llap.coordinator.LlapCoordinator) TaskLocationHint(org.apache.tez.dag.api.TaskLocationHint) LoginException(javax.security.auth.login.LoginException) URISyntaxException(java.net.URISyntaxException) UDFArgumentLengthException(org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException) FileNotFoundException(java.io.FileNotFoundException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) UDFArgumentException(org.apache.hadoop.hive.ql.exec.UDFArgumentException) UDFArgumentTypeException(org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException) CommandNeedRetryException(org.apache.hadoop.hive.ql.CommandNeedRetryException) IOException(java.io.IOException) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) MapWork(org.apache.hadoop.hive.ql.plan.MapWork) Event(org.apache.tez.runtime.api.Event) InputConfigureVertexTasksEvent(org.apache.tez.runtime.api.events.InputConfigureVertexTasksEvent) InputDataInformationEvent(org.apache.tez.runtime.api.events.InputDataInformationEvent) InputConfigureVertexTasksEvent(org.apache.tez.runtime.api.events.InputConfigureVertexTasksEvent) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId)

Example 10 with LocalResource

use of org.apache.hadoop.yarn.api.records.LocalResource in project hadoop by apache.

the class TestFSDownload method createJarFile.

static LocalResource createJarFile(FileContext files, Path p, int len, Random r, LocalResourceVisibility vis) throws IOException, URISyntaxException {
    byte[] bytes = new byte[len];
    r.nextBytes(bytes);
    File archiveFile = new File(p.toUri().getPath() + ".jar");
    archiveFile.createNewFile();
    JarOutputStream out = new JarOutputStream(new FileOutputStream(archiveFile));
    out.putNextEntry(new JarEntry(p.getName()));
    out.write(bytes);
    out.closeEntry();
    out.close();
    LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
    ret.setResource(URL.fromPath(new Path(p.toString() + ".jar")));
    ret.setSize(len);
    ret.setType(LocalResourceType.ARCHIVE);
    ret.setVisibility(vis);
    ret.setTimestamp(files.getFileStatus(new Path(p.toString() + ".jar")).getModificationTime());
    return ret;
}
Also used : Path(org.apache.hadoop.fs.Path) FileOutputStream(java.io.FileOutputStream) JarOutputStream(java.util.jar.JarOutputStream) JarEntry(java.util.jar.JarEntry) File(java.io.File) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource)

Aggregations

LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)128 Path (org.apache.hadoop.fs.Path)84 HashMap (java.util.HashMap)67 Test (org.junit.Test)48 ArrayList (java.util.ArrayList)42 ContainerLaunchContext (org.apache.hadoop.yarn.api.records.ContainerLaunchContext)40 IOException (java.io.IOException)37 File (java.io.File)30 FileSystem (org.apache.hadoop.fs.FileSystem)29 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)29 Configuration (org.apache.hadoop.conf.Configuration)28 URL (org.apache.hadoop.yarn.api.records.URL)26 FileStatus (org.apache.hadoop.fs.FileStatus)25 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)24 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)22 ByteBuffer (java.nio.ByteBuffer)18 LocalResourceVisibility (org.apache.hadoop.yarn.api.records.LocalResourceVisibility)18 Credentials (org.apache.hadoop.security.Credentials)17 StartContainerRequest (org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest)17 DataOutputBuffer (org.apache.hadoop.io.DataOutputBuffer)16