Search in sources :

Example 26 with DataOutputBuffer

use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.

the class TestWritableJobConf method serDeser.

private <K> K serDeser(K conf) throws Exception {
    SerializationFactory factory = new SerializationFactory(CONF);
    Serializer<K> serializer = factory.getSerializer(GenericsUtil.getClass(conf));
    Deserializer<K> deserializer = factory.getDeserializer(GenericsUtil.getClass(conf));
    DataOutputBuffer out = new DataOutputBuffer();
    serializer.open(out);
    serializer.serialize(conf);
    serializer.close();
    DataInputBuffer in = new DataInputBuffer();
    in.reset(out.getData(), out.getLength());
    deserializer.open(in);
    K after = deserializer.deserialize(null);
    deserializer.close();
    return after;
}
Also used : DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) SerializationFactory(org.apache.hadoop.io.serializer.SerializationFactory)

Example 27 with DataOutputBuffer

use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.

the class TestSequenceFileAsBinaryOutputFormat method testBinary.

@Test
public void testBinary() throws IOException {
    JobConf job = new JobConf();
    FileSystem fs = FileSystem.getLocal(job);
    Path dir = new Path(new Path(new Path(System.getProperty("test.build.data", ".")), FileOutputCommitter.TEMP_DIR_NAME), "_" + attempt);
    Path file = new Path(dir, "testbinary.seq");
    Random r = new Random();
    long seed = r.nextLong();
    r.setSeed(seed);
    fs.delete(dir, true);
    if (!fs.mkdirs(dir)) {
        fail("Failed to create output directory");
    }
    job.set(JobContext.TASK_ATTEMPT_ID, attempt);
    FileOutputFormat.setOutputPath(job, dir.getParent().getParent());
    FileOutputFormat.setWorkOutputPath(job, dir);
    SequenceFileAsBinaryOutputFormat.setSequenceFileOutputKeyClass(job, IntWritable.class);
    SequenceFileAsBinaryOutputFormat.setSequenceFileOutputValueClass(job, DoubleWritable.class);
    SequenceFileAsBinaryOutputFormat.setCompressOutput(job, true);
    SequenceFileAsBinaryOutputFormat.setOutputCompressionType(job, CompressionType.BLOCK);
    BytesWritable bkey = new BytesWritable();
    BytesWritable bval = new BytesWritable();
    RecordWriter<BytesWritable, BytesWritable> writer = new SequenceFileAsBinaryOutputFormat().getRecordWriter(fs, job, file.toString(), Reporter.NULL);
    IntWritable iwritable = new IntWritable();
    DoubleWritable dwritable = new DoubleWritable();
    DataOutputBuffer outbuf = new DataOutputBuffer();
    LOG.info("Creating data by SequenceFileAsBinaryOutputFormat");
    try {
        for (int i = 0; i < RECORDS; ++i) {
            iwritable = new IntWritable(r.nextInt());
            iwritable.write(outbuf);
            bkey.set(outbuf.getData(), 0, outbuf.getLength());
            outbuf.reset();
            dwritable = new DoubleWritable(r.nextDouble());
            dwritable.write(outbuf);
            bval.set(outbuf.getData(), 0, outbuf.getLength());
            outbuf.reset();
            writer.write(bkey, bval);
        }
    } finally {
        writer.close(Reporter.NULL);
    }
    InputFormat<IntWritable, DoubleWritable> iformat = new SequenceFileInputFormat<IntWritable, DoubleWritable>();
    int count = 0;
    r.setSeed(seed);
    DataInputBuffer buf = new DataInputBuffer();
    final int NUM_SPLITS = 3;
    SequenceFileInputFormat.addInputPath(job, file);
    LOG.info("Reading data by SequenceFileInputFormat");
    for (InputSplit split : iformat.getSplits(job, NUM_SPLITS)) {
        RecordReader<IntWritable, DoubleWritable> reader = iformat.getRecordReader(split, job, Reporter.NULL);
        try {
            int sourceInt;
            double sourceDouble;
            while (reader.next(iwritable, dwritable)) {
                sourceInt = r.nextInt();
                sourceDouble = r.nextDouble();
                assertEquals("Keys don't match: " + "*" + iwritable.get() + ":" + sourceInt + "*", sourceInt, iwritable.get());
                assertTrue("Vals don't match: " + "*" + dwritable.get() + ":" + sourceDouble + "*", Double.compare(dwritable.get(), sourceDouble) == 0);
                ++count;
            }
        } finally {
            reader.close();
        }
    }
    assertEquals("Some records not found", RECORDS, count);
}
Also used : Path(org.apache.hadoop.fs.Path) BytesWritable(org.apache.hadoop.io.BytesWritable) DoubleWritable(org.apache.hadoop.io.DoubleWritable) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) Random(java.util.Random) FileSystem(org.apache.hadoop.fs.FileSystem) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) IntWritable(org.apache.hadoop.io.IntWritable) Test(org.junit.Test)

Example 28 with DataOutputBuffer

use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.

the class StreamXmlRecordReader method next.

public synchronized boolean next(Text key, Text value) throws IOException {
    numNext++;
    if (pos_ >= end_) {
        return false;
    }
    DataOutputBuffer buf = new DataOutputBuffer();
    if (!readUntilMatchBegin()) {
        return false;
    }
    if (pos_ >= end_ || !readUntilMatchEnd(buf)) {
        return false;
    }
    // There is only one elem..key/value splitting is not done here.
    byte[] record = new byte[buf.getLength()];
    System.arraycopy(buf.getData(), 0, record, 0, record.length);
    numRecStats(record, 0, record.length);
    key.set(record);
    value.set("");
    return true;
}
Also used : DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer)

Example 29 with DataOutputBuffer

use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.

the class TestYarnClient method testAutomaticTimelineDelegationTokenLoading.

@Test
public void testAutomaticTimelineDelegationTokenLoading() throws Exception {
    Configuration conf = new YarnConfiguration();
    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
    SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
    TimelineDelegationTokenIdentifier timelineDT = new TimelineDelegationTokenIdentifier();
    final Token<TimelineDelegationTokenIdentifier> dToken = new Token<TimelineDelegationTokenIdentifier>(timelineDT.getBytes(), new byte[0], timelineDT.getKind(), new Text());
    // create a mock client
    YarnClientImpl client = spy(new YarnClientImpl() {

        @Override
        TimelineClient createTimelineClient() throws IOException, YarnException {
            timelineClient = mock(TimelineClient.class);
            when(timelineClient.getDelegationToken(any(String.class))).thenReturn(dToken);
            return timelineClient;
        }

        @Override
        protected void serviceStart() throws Exception {
            rmClient = mock(ApplicationClientProtocol.class);
        }

        @Override
        protected void serviceStop() throws Exception {
        }

        @Override
        public ApplicationReport getApplicationReport(ApplicationId appId) {
            ApplicationReport report = mock(ApplicationReport.class);
            when(report.getYarnApplicationState()).thenReturn(YarnApplicationState.RUNNING);
            return report;
        }

        @Override
        public boolean isSecurityEnabled() {
            return true;
        }
    });
    client.init(conf);
    client.start();
    try {
        // when i == 1, timeline DT doesn't exist, need to get one more
        for (int i = 0; i < 2; ++i) {
            ApplicationSubmissionContext context = mock(ApplicationSubmissionContext.class);
            ApplicationId applicationId = ApplicationId.newInstance(0, i + 1);
            when(context.getApplicationId()).thenReturn(applicationId);
            DataOutputBuffer dob = new DataOutputBuffer();
            Credentials credentials = new Credentials();
            if (i == 0) {
                credentials.addToken(client.timelineService, dToken);
            }
            credentials.writeTokenStorageToStream(dob);
            ByteBuffer tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
            ContainerLaunchContext clc = ContainerLaunchContext.newInstance(null, null, null, null, tokens, null);
            when(context.getAMContainerSpec()).thenReturn(clc);
            client.submitApplication(context);
            if (i == 0) {
                // GetTimelineDelegationToken shouldn't be called
                verify(client, never()).getTimelineDelegationToken();
            }
            // In either way, token should be there
            credentials = new Credentials();
            DataInputByteBuffer dibb = new DataInputByteBuffer();
            tokens = clc.getTokens();
            if (tokens != null) {
                dibb.reset(tokens);
                credentials.readTokenStorageStream(dibb);
                tokens.rewind();
            }
            Collection<Token<? extends TokenIdentifier>> dTokens = credentials.getAllTokens();
            Assert.assertEquals(1, dTokens.size());
            Assert.assertEquals(dToken, dTokens.iterator().next());
        }
    } finally {
        client.stop();
    }
}
Also used : TokenIdentifier(org.apache.hadoop.security.token.TokenIdentifier) TimelineDelegationTokenIdentifier(org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier) CapacitySchedulerConfiguration(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) DataInputByteBuffer(org.apache.hadoop.io.DataInputByteBuffer) TimelineDelegationTokenIdentifier(org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier) Token(org.apache.hadoop.security.token.Token) Text(org.apache.hadoop.io.Text) IOException(java.io.IOException) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ByteBuffer(java.nio.ByteBuffer) DataInputByteBuffer(org.apache.hadoop.io.DataInputByteBuffer) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) ApplicationNotFoundException(org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException) ApplicationIdNotProvidedException(org.apache.hadoop.yarn.exceptions.ApplicationIdNotProvidedException) IOException(java.io.IOException) ContainerNotFoundException(org.apache.hadoop.yarn.exceptions.ContainerNotFoundException) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) TimelineClient(org.apache.hadoop.yarn.client.api.TimelineClient) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Credentials(org.apache.hadoop.security.Credentials) Test(org.junit.Test)

Example 30 with DataOutputBuffer

use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.

the class TestResourceLocalizationService method testDownloadingResourcesOnContainerKill.

@Test(timeout = 20000)
@SuppressWarnings("unchecked")
public void testDownloadingResourcesOnContainerKill() throws Exception {
    List<Path> localDirs = new ArrayList<Path>();
    String[] sDirs = new String[1];
    localDirs.add(lfs.makeQualified(new Path(basedir, 0 + "")));
    sDirs[0] = localDirs.get(0).toString();
    conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs);
    DrainDispatcher dispatcher = new DrainDispatcher();
    dispatcher.init(conf);
    dispatcher.start();
    EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class);
    dispatcher.register(ApplicationEventType.class, applicationBus);
    EventHandler<ContainerEvent> containerBus = mock(EventHandler.class);
    dispatcher.register(ContainerEventType.class, containerBus);
    DummyExecutor exec = new DummyExecutor();
    LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
    dirsHandler.init(conf);
    DeletionService delServiceReal = new DeletionService(exec);
    DeletionService delService = spy(delServiceReal);
    delService.init(new Configuration());
    delService.start();
    ResourceLocalizationService rawService = new ResourceLocalizationService(dispatcher, exec, delService, dirsHandler, nmContext);
    ResourceLocalizationService spyService = spy(rawService);
    doReturn(mockServer).when(spyService).createServer();
    doReturn(lfs).when(spyService).getLocalFileContext(isA(Configuration.class));
    FsPermission defaultPermission = FsPermission.getDirDefault().applyUMask(lfs.getUMask());
    FsPermission nmPermission = ResourceLocalizationService.NM_PRIVATE_PERM.applyUMask(lfs.getUMask());
    final Path userDir = new Path(sDirs[0].substring("file:".length()), ContainerLocalizer.USERCACHE);
    final Path fileDir = new Path(sDirs[0].substring("file:".length()), ContainerLocalizer.FILECACHE);
    final Path sysDir = new Path(sDirs[0].substring("file:".length()), ResourceLocalizationService.NM_PRIVATE_DIR);
    final FileStatus fs = new FileStatus(0, true, 1, 0, System.currentTimeMillis(), 0, defaultPermission, "", "", new Path(sDirs[0]));
    final FileStatus nmFs = new FileStatus(0, true, 1, 0, System.currentTimeMillis(), 0, nmPermission, "", "", sysDir);
    doAnswer(new Answer<FileStatus>() {

        @Override
        public FileStatus answer(InvocationOnMock invocation) throws Throwable {
            Object[] args = invocation.getArguments();
            if (args.length > 0) {
                if (args[0].equals(userDir) || args[0].equals(fileDir)) {
                    return fs;
                }
            }
            return nmFs;
        }
    }).when(spylfs).getFileStatus(isA(Path.class));
    try {
        spyService.init(conf);
        spyService.start();
        final Application app = mock(Application.class);
        final ApplicationId appId = BuilderUtils.newApplicationId(314159265358979L, 3);
        String user = "user0";
        when(app.getUser()).thenReturn(user);
        when(app.getAppId()).thenReturn(appId);
        spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES, app));
        ArgumentMatcher<ApplicationEvent> matchesAppInit = new ArgumentMatcher<ApplicationEvent>() {

            @Override
            public boolean matches(Object o) {
                ApplicationEvent evt = (ApplicationEvent) o;
                return evt.getType() == ApplicationEventType.APPLICATION_INITED && appId == evt.getApplicationID();
            }
        };
        dispatcher.await();
        verify(applicationBus).handle(argThat(matchesAppInit));
        // Initialize localizer.
        Random r = new Random();
        long seed = r.nextLong();
        System.out.println("SEED: " + seed);
        r.setSeed(seed);
        final Container c1 = getMockContainer(appId, 42, "user0");
        final Container c2 = getMockContainer(appId, 43, "user0");
        FSDataOutputStream out = new FSDataOutputStream(new DataOutputBuffer(), null);
        doReturn(out).when(spylfs).createInternal(isA(Path.class), isA(EnumSet.class), isA(FsPermission.class), anyInt(), anyShort(), anyLong(), isA(Progressable.class), isA(ChecksumOpt.class), anyBoolean());
        final LocalResource resource1 = getPrivateMockedResource(r);
        LocalResource resource2 = null;
        do {
            resource2 = getPrivateMockedResource(r);
        } while (resource2 == null || resource2.equals(resource1));
        LocalResource resource3 = null;
        do {
            resource3 = getPrivateMockedResource(r);
        } while (resource3 == null || resource3.equals(resource1) || resource3.equals(resource2));
        // Send localization requests for container c1 and c2.
        final LocalResourceRequest req1 = new LocalResourceRequest(resource1);
        final LocalResourceRequest req2 = new LocalResourceRequest(resource2);
        final LocalResourceRequest req3 = new LocalResourceRequest(resource3);
        Map<LocalResourceVisibility, Collection<LocalResourceRequest>> rsrcs = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>();
        List<LocalResourceRequest> privateResourceList = new ArrayList<LocalResourceRequest>();
        privateResourceList.add(req1);
        privateResourceList.add(req2);
        privateResourceList.add(req3);
        rsrcs.put(LocalResourceVisibility.PRIVATE, privateResourceList);
        spyService.handle(new ContainerLocalizationRequestEvent(c1, rsrcs));
        final LocalResourceRequest req1_1 = new LocalResourceRequest(resource2);
        Map<LocalResourceVisibility, Collection<LocalResourceRequest>> rsrcs1 = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>();
        List<LocalResourceRequest> privateResourceList1 = new ArrayList<LocalResourceRequest>();
        privateResourceList1.add(req1_1);
        rsrcs1.put(LocalResourceVisibility.PRIVATE, privateResourceList1);
        spyService.handle(new ContainerLocalizationRequestEvent(c2, rsrcs1));
        dispatcher.await();
        // Wait for localizers of both container c1 and c2 to begin.
        exec.waitForLocalizers(2);
        LocalizerRunner locC1 = spyService.getLocalizerRunner(c1.getContainerId().toString());
        final String containerIdStr = c1.getContainerId().toString();
        // Heartbeats from container localizer
        LocalResourceStatus rsrc1success = mock(LocalResourceStatus.class);
        LocalResourceStatus rsrc2pending = mock(LocalResourceStatus.class);
        LocalizerStatus stat = mock(LocalizerStatus.class);
        when(stat.getLocalizerId()).thenReturn(containerIdStr);
        when(rsrc1success.getResource()).thenReturn(resource1);
        when(rsrc2pending.getResource()).thenReturn(resource2);
        when(rsrc1success.getLocalSize()).thenReturn(4344L);
        URL locPath = getPath("/some/path");
        when(rsrc1success.getLocalPath()).thenReturn(locPath);
        when(rsrc1success.getStatus()).thenReturn(ResourceStatusType.FETCH_SUCCESS);
        when(rsrc2pending.getStatus()).thenReturn(ResourceStatusType.FETCH_PENDING);
        when(stat.getResources()).thenReturn(Collections.<LocalResourceStatus>emptyList()).thenReturn(Collections.singletonList(rsrc1success)).thenReturn(Collections.singletonList(rsrc2pending)).thenReturn(Collections.singletonList(rsrc2pending)).thenReturn(Collections.<LocalResourceStatus>emptyList());
        // First heartbeat which schedules first resource.
        LocalizerHeartbeatResponse response = spyService.heartbeat(stat);
        assertEquals(LocalizerAction.LIVE, response.getLocalizerAction());
        // Second heartbeat which reports first resource as success.
        // Second resource is scheduled.
        response = spyService.heartbeat(stat);
        assertEquals(LocalizerAction.LIVE, response.getLocalizerAction());
        final String locPath1 = response.getResourceSpecs().get(0).getDestinationDirectory().getFile();
        // Third heartbeat which reports second resource as pending.
        // Third resource is scheduled.
        response = spyService.heartbeat(stat);
        assertEquals(LocalizerAction.LIVE, response.getLocalizerAction());
        final String locPath2 = response.getResourceSpecs().get(0).getDestinationDirectory().getFile();
        // Container c1 is killed which leads to cleanup
        spyService.handle(new ContainerLocalizationCleanupEvent(c1, rsrcs));
        // This heartbeat will indicate to container localizer to die as localizer
        // runner has stopped.
        response = spyService.heartbeat(stat);
        assertEquals(LocalizerAction.DIE, response.getLocalizerAction());
        exec.setStopLocalization();
        dispatcher.await();
        // verify container notification
        ArgumentMatcher<ContainerEvent> successContainerLoc = new ArgumentMatcher<ContainerEvent>() {

            @Override
            public boolean matches(Object o) {
                ContainerEvent evt = (ContainerEvent) o;
                return evt.getType() == ContainerEventType.RESOURCE_LOCALIZED && c1.getContainerId() == evt.getContainerID();
            }
        };
        // Only one resource gets localized for container c1.
        verify(containerBus).handle(argThat(successContainerLoc));
        Set<Path> paths = Sets.newHashSet(new Path(locPath1), new Path(locPath1 + "_tmp"), new Path(locPath2), new Path(locPath2 + "_tmp"));
        // Wait for localizer runner thread for container c1 to finish.
        while (locC1.getState() != Thread.State.TERMINATED) {
            Thread.sleep(50);
        }
        // Verify if downloading resources were submitted for deletion.
        verify(delService).delete(eq(user), (Path) eq(null), argThat(new DownloadingPathsMatcher(paths)));
        LocalResourcesTracker tracker = spyService.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE, "user0", appId);
        // Container c1 was killed but this resource was localized before kill
        // hence its not removed despite ref cnt being 0.
        LocalizedResource rsrc1 = tracker.getLocalizedResource(req1);
        assertNotNull(rsrc1);
        assertEquals(rsrc1.getState(), ResourceState.LOCALIZED);
        assertEquals(rsrc1.getRefCount(), 0);
        // Container c1 was killed but this resource is referenced by container c2
        // as well hence its ref cnt is 1.
        LocalizedResource rsrc2 = tracker.getLocalizedResource(req2);
        assertNotNull(rsrc2);
        assertEquals(rsrc2.getState(), ResourceState.DOWNLOADING);
        assertEquals(rsrc2.getRefCount(), 1);
        // As container c1 was killed and this resource was not referenced by any
        // other container, hence its removed.
        LocalizedResource rsrc3 = tracker.getLocalizedResource(req3);
        assertNull(rsrc3);
    } finally {
        spyService.stop();
        dispatcher.stop();
        delService.stop();
    }
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) ContainerLocalizationRequestEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationRequestEvent) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Container(org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container) Random(java.util.Random) LocalizerRunner(org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService.LocalizerRunner) ArgumentMatcher(org.mockito.ArgumentMatcher) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) ApplicationLocalizationEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ApplicationLocalizationEvent) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) ApplicationEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent) DeletionService(org.apache.hadoop.yarn.server.nodemanager.DeletionService) LocalizerStatus(org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus) LocalDirsHandlerService(org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService) ChecksumOpt(org.apache.hadoop.fs.Options.ChecksumOpt) Progressable(org.apache.hadoop.util.Progressable) Collection(java.util.Collection) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Application(org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application) DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) URL(org.apache.hadoop.yarn.api.records.URL) LocalResourceVisibility(org.apache.hadoop.yarn.api.records.LocalResourceVisibility) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Path(org.apache.hadoop.fs.Path) ContainerEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent) EnumSet(java.util.EnumSet) ContainerLocalizationCleanupEvent(org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationCleanupEvent) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) InvocationOnMock(org.mockito.invocation.InvocationOnMock) LocalizerHeartbeatResponse(org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse) LocalResourceStatus(org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus) Test(org.junit.Test)

Aggregations

DataOutputBuffer (org.apache.hadoop.io.DataOutputBuffer)196 Test (org.junit.Test)66 ByteBuffer (java.nio.ByteBuffer)54 Credentials (org.apache.hadoop.security.Credentials)49 IOException (java.io.IOException)47 DataInputBuffer (org.apache.hadoop.io.DataInputBuffer)46 Token (org.apache.hadoop.security.token.Token)36 Configuration (org.apache.hadoop.conf.Configuration)35 Path (org.apache.hadoop.fs.Path)31 HashMap (java.util.HashMap)28 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)26 Text (org.apache.hadoop.io.Text)25 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)24 ArrayList (java.util.ArrayList)22 ContainerLaunchContext (org.apache.hadoop.yarn.api.records.ContainerLaunchContext)22 DataInputStream (java.io.DataInputStream)21 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)19 Map (java.util.Map)16 Random (java.util.Random)15 FileSystem (org.apache.hadoop.fs.FileSystem)15