Search in sources :

Example 1 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.

the class TestContainerManagerSecurity method testContainerTokenWithEpoch.

/**
   * This tests whether a containerId is serialized/deserialized with epoch.
   *
   * @throws IOException
   * @throws InterruptedException
   * @throws YarnException
   */
private void testContainerTokenWithEpoch(Configuration conf) throws IOException, InterruptedException, YarnException {
    LOG.info("Running test for serializing/deserializing containerIds");
    NMTokenSecretManagerInRM nmTokenSecretManagerInRM = yarnCluster.getResourceManager().getRMContext().getNMTokenSecretManager();
    ApplicationId appId = ApplicationId.newInstance(1, 1);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0);
    ContainerId cId = ContainerId.newContainerId(appAttemptId, (5L << 40) | 3L);
    NodeManager nm = yarnCluster.getNodeManager(0);
    NMTokenSecretManagerInNM nmTokenSecretManagerInNM = nm.getNMContext().getNMTokenSecretManager();
    String user = "test";
    waitForNMToReceiveNMTokenKey(nmTokenSecretManagerInNM, nm);
    NodeId nodeId = nm.getNMContext().getNodeId();
    // Both id should be equal.
    Assert.assertEquals(nmTokenSecretManagerInNM.getCurrentKey().getKeyId(), nmTokenSecretManagerInRM.getCurrentKey().getKeyId());
    // Creating a normal Container Token
    RMContainerTokenSecretManager containerTokenSecretManager = yarnCluster.getResourceManager().getRMContext().getContainerTokenSecretManager();
    Resource r = Resource.newInstance(1230, 2);
    Token containerToken = containerTokenSecretManager.createContainerToken(cId, 0, nodeId, user, r, Priority.newInstance(0), 0);
    ContainerTokenIdentifier containerTokenIdentifier = new ContainerTokenIdentifier();
    byte[] tokenIdentifierContent = containerToken.getIdentifier().array();
    DataInputBuffer dib = new DataInputBuffer();
    dib.reset(tokenIdentifierContent, tokenIdentifierContent.length);
    containerTokenIdentifier.readFields(dib);
    Assert.assertEquals(cId, containerTokenIdentifier.getContainerID());
    Assert.assertEquals(cId.toString(), containerTokenIdentifier.getContainerID().toString());
    Token nmToken = nmTokenSecretManagerInRM.createNMToken(appAttemptId, nodeId, user);
    YarnRPC rpc = YarnRPC.create(conf);
    testStartContainer(rpc, appAttemptId, nodeId, containerToken, nmToken, false);
    List<ContainerId> containerIds = new LinkedList<ContainerId>();
    containerIds.add(cId);
    ContainerManagementProtocol proxy = getContainerManagementProtocolProxy(rpc, nmToken, nodeId, user);
    GetContainerStatusesResponse res = proxy.getContainerStatuses(GetContainerStatusesRequest.newInstance(containerIds));
    Assert.assertNotNull(res.getContainerStatuses().get(0));
    Assert.assertEquals(cId, res.getContainerStatuses().get(0).getContainerId());
    Assert.assertEquals(cId.toString(), res.getContainerStatuses().get(0).getContainerId().toString());
}
Also used : Resource(org.apache.hadoop.yarn.api.records.Resource) NMTokenSecretManagerInNM(org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) Token(org.apache.hadoop.yarn.api.records.Token) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) YarnRPC(org.apache.hadoop.yarn.ipc.YarnRPC) NMTokenSecretManagerInRM(org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM) LinkedList(java.util.LinkedList) ContainerTokenIdentifier(org.apache.hadoop.yarn.security.ContainerTokenIdentifier) GetContainerStatusesResponse(org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse) NodeManager(org.apache.hadoop.yarn.server.nodemanager.NodeManager) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) ContainerManagementProtocol(org.apache.hadoop.yarn.api.ContainerManagementProtocol) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) NodeId(org.apache.hadoop.yarn.api.records.NodeId) RMContainerTokenSecretManager(org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId)

Example 2 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.

the class TestMRSequenceFileAsBinaryInputFormat method testBinary.

@Test
public void testBinary() throws IOException, InterruptedException {
    Job job = Job.getInstance();
    FileSystem fs = FileSystem.getLocal(job.getConfiguration());
    Path dir = new Path(System.getProperty("test.build.data", ".") + "/mapred");
    Path file = new Path(dir, "testbinary.seq");
    Random r = new Random();
    long seed = r.nextLong();
    r.setSeed(seed);
    fs.delete(dir, true);
    FileInputFormat.setInputPaths(job, dir);
    Text tkey = new Text();
    Text tval = new Text();
    SequenceFile.Writer writer = new SequenceFile.Writer(fs, job.getConfiguration(), file, Text.class, Text.class);
    try {
        for (int i = 0; i < RECORDS; ++i) {
            tkey.set(Integer.toString(r.nextInt(), 36));
            tval.set(Long.toString(r.nextLong(), 36));
            writer.append(tkey, tval);
        }
    } finally {
        writer.close();
    }
    TaskAttemptContext context = MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
    InputFormat<BytesWritable, BytesWritable> bformat = new SequenceFileAsBinaryInputFormat();
    int count = 0;
    r.setSeed(seed);
    BytesWritable bkey = new BytesWritable();
    BytesWritable bval = new BytesWritable();
    Text cmpkey = new Text();
    Text cmpval = new Text();
    DataInputBuffer buf = new DataInputBuffer();
    FileInputFormat.setInputPaths(job, file);
    for (InputSplit split : bformat.getSplits(job)) {
        RecordReader<BytesWritable, BytesWritable> reader = bformat.createRecordReader(split, context);
        MapContext<BytesWritable, BytesWritable, BytesWritable, BytesWritable> mcontext = new MapContextImpl<BytesWritable, BytesWritable, BytesWritable, BytesWritable>(job.getConfiguration(), context.getTaskAttemptID(), reader, null, null, MapReduceTestUtil.createDummyReporter(), split);
        reader.initialize(split, mcontext);
        try {
            while (reader.nextKeyValue()) {
                bkey = reader.getCurrentKey();
                bval = reader.getCurrentValue();
                tkey.set(Integer.toString(r.nextInt(), 36));
                tval.set(Long.toString(r.nextLong(), 36));
                buf.reset(bkey.getBytes(), bkey.getLength());
                cmpkey.readFields(buf);
                buf.reset(bval.getBytes(), bval.getLength());
                cmpval.readFields(buf);
                assertTrue("Keys don't match: " + "*" + cmpkey.toString() + ":" + tkey.toString() + "*", cmpkey.toString().equals(tkey.toString()));
                assertTrue("Vals don't match: " + "*" + cmpval.toString() + ":" + tval.toString() + "*", cmpval.toString().equals(tval.toString()));
                ++count;
            }
        } finally {
            reader.close();
        }
    }
    assertEquals("Some records not found", RECORDS, count);
}
Also used : Path(org.apache.hadoop.fs.Path) MapContextImpl(org.apache.hadoop.mapreduce.task.MapContextImpl) Text(org.apache.hadoop.io.Text) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) BytesWritable(org.apache.hadoop.io.BytesWritable) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) Random(java.util.Random) SequenceFile(org.apache.hadoop.io.SequenceFile) FileSystem(org.apache.hadoop.fs.FileSystem) Job(org.apache.hadoop.mapreduce.Job) InputSplit(org.apache.hadoop.mapreduce.InputSplit) Test(org.junit.Test)

Example 3 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.

the class TestBufferPushPull method testPull.

@Test
public void testPull() throws Exception {
    final byte[] buff = new byte[BUFFER_LENGTH];
    final InputBuffer input = new InputBuffer(buff);
    final OutputBuffer out = new OutputBuffer(buff);
    final Class<BytesWritable> iKClass = BytesWritable.class;
    final Class<BytesWritable> iVClass = BytesWritable.class;
    final NativeHandlerForPull handler = new NativeHandlerForPull(input, out);
    final KeyValueIterator iter = new KeyValueIterator();
    final BufferPullee pullee = new BufferPullee(iKClass, iVClass, iter, handler);
    handler.setDataLoader(pullee);
    final BufferPuller puller = new BufferPuller(handler);
    handler.setDataReceiver(puller);
    int count = 0;
    while (puller.next()) {
        final DataInputBuffer key = puller.getKey();
        final DataInputBuffer value = puller.getValue();
        final BytesWritable keyBytes = new BytesWritable();
        final BytesWritable valueBytes = new BytesWritable();
        keyBytes.readFields(key);
        valueBytes.readFields(value);
        Assert.assertEquals(dataInput[count].key.toString(), keyBytes.toString());
        Assert.assertEquals(dataInput[count].value.toString(), valueBytes.toString());
        count++;
    }
    puller.close();
    pullee.close();
}
Also used : DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) RawKeyValueIterator(org.apache.hadoop.mapred.RawKeyValueIterator) BufferPullee(org.apache.hadoop.mapred.nativetask.handlers.BufferPullee) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) BytesWritable(org.apache.hadoop.io.BytesWritable) BufferPuller(org.apache.hadoop.mapred.nativetask.handlers.BufferPuller) Test(org.junit.Test)

Example 4 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.

the class TestMerger method getKeyAnswer.

private Answer<?> getKeyAnswer(final String segmentName, final boolean isCompressedInput) {
    return new Answer<Object>() {

        int i = 0;

        @SuppressWarnings("unchecked")
        public Boolean answer(InvocationOnMock invocation) {
            if (i++ == 3) {
                return false;
            }
            Reader<Text, Text> mock = (Reader<Text, Text>) invocation.getMock();
            int multiplier = isCompressedInput ? 100 : 1;
            mock.bytesRead += 10 * multiplier;
            Object[] args = invocation.getArguments();
            DataInputBuffer key = (DataInputBuffer) args[0];
            key.reset(("Segment Key " + segmentName + i).getBytes(), 20);
            return true;
        }
    };
}
Also used : Answer(org.mockito.stubbing.Answer) Mockito.doAnswer(org.mockito.Mockito.doAnswer) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Reader(org.apache.hadoop.mapred.IFile.Reader) Text(org.apache.hadoop.io.Text)

Example 5 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.

the class TestIFileStreams method testIFileStream.

@Test
public void testIFileStream() throws Exception {
    final int DLEN = 100;
    DataOutputBuffer dob = new DataOutputBuffer(DLEN + 4);
    IFileOutputStream ifos = new IFileOutputStream(dob);
    for (int i = 0; i < DLEN; ++i) {
        ifos.write(i);
    }
    ifos.close();
    DataInputBuffer dib = new DataInputBuffer();
    dib.reset(dob.getData(), DLEN + 4);
    IFileInputStream ifis = new IFileInputStream(dib, 104, new Configuration());
    for (int i = 0; i < DLEN; ++i) {
        assertEquals(i, ifis.read());
    }
    ifis.close();
}
Also used : DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) Configuration(org.apache.hadoop.conf.Configuration) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) Test(org.junit.Test)

Aggregations

DataInputBuffer (org.apache.hadoop.io.DataInputBuffer)113 Test (org.junit.Test)50 DataOutputBuffer (org.apache.hadoop.io.DataOutputBuffer)46 IOException (java.io.IOException)24 Text (org.apache.hadoop.io.Text)20 Path (org.apache.hadoop.fs.Path)16 Configuration (org.apache.hadoop.conf.Configuration)12 IntWritable (org.apache.hadoop.io.IntWritable)11 Random (java.util.Random)10 DataInputStream (java.io.DataInputStream)9 BufferedInputStream (java.io.BufferedInputStream)8 HashMap (java.util.HashMap)7 DataOutputStream (java.io.DataOutputStream)6 BytesWritable (org.apache.hadoop.io.BytesWritable)6 LongWritable (org.apache.hadoop.io.LongWritable)6 SerializationFactory (org.apache.hadoop.io.serializer.SerializationFactory)6 IFile (org.apache.tez.runtime.library.common.sort.impl.IFile)6 BufferedOutputStream (java.io.BufferedOutputStream)5 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)4 InMemoryWriter (org.apache.tez.runtime.library.common.shuffle.orderedgrouped.InMemoryWriter)4