Search in sources :

Example 61 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.

the class TestYARNTokenIdentifier method testAMRMTokenIdentifier.

@Test
public void testAMRMTokenIdentifier() throws IOException {
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(ApplicationId.newInstance(1, 1), 1);
    int masterKeyId = 1;
    AMRMTokenIdentifier token = new AMRMTokenIdentifier(appAttemptId, masterKeyId);
    AMRMTokenIdentifier anotherToken = new AMRMTokenIdentifier();
    byte[] tokenContent = token.getBytes();
    DataInputBuffer dib = new DataInputBuffer();
    dib.reset(tokenContent, tokenContent.length);
    anotherToken.readFields(dib);
    // verify the whole record equals with original record
    Assert.assertEquals("Token is not the same after serialization " + "and deserialization.", token, anotherToken);
    Assert.assertEquals("ApplicationAttemptId from proto is not the same with original token", anotherToken.getApplicationAttemptId(), appAttemptId);
    Assert.assertEquals("masterKeyId from proto is not the same with original token", anotherToken.getKeyId(), masterKeyId);
}
Also used : DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) Test(org.junit.Test)

Example 62 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.

the class TestYARNTokenIdentifier method testTimelineDelegationTokenIdentifier.

@Test
public void testTimelineDelegationTokenIdentifier() throws IOException {
    Text owner = new Text("user1");
    Text renewer = new Text("user2");
    Text realUser = new Text("user3");
    long issueDate = 1;
    long maxDate = 2;
    int sequenceNumber = 3;
    int masterKeyId = 4;
    TimelineDelegationTokenIdentifier token = new TimelineDelegationTokenIdentifier(owner, renewer, realUser);
    token.setIssueDate(issueDate);
    token.setMaxDate(maxDate);
    token.setSequenceNumber(sequenceNumber);
    token.setMasterKeyId(masterKeyId);
    TimelineDelegationTokenIdentifier anotherToken = new TimelineDelegationTokenIdentifier();
    byte[] tokenContent = token.getBytes();
    DataInputBuffer dib = new DataInputBuffer();
    dib.reset(tokenContent, tokenContent.length);
    anotherToken.readFields(dib);
    // verify the whole record equals with original record
    Assert.assertEquals("Token is not the same after serialization " + "and deserialization.", token, anotherToken);
    Assert.assertEquals("owner from proto is not the same with original token", anotherToken.getOwner(), owner);
    Assert.assertEquals("renewer from proto is not the same with original token", anotherToken.getRenewer(), renewer);
    Assert.assertEquals("realUser from proto is not the same with original token", anotherToken.getRealUser(), realUser);
    Assert.assertEquals("issueDate from proto is not the same with original token", anotherToken.getIssueDate(), issueDate);
    Assert.assertEquals("maxDate from proto is not the same with original token", anotherToken.getMaxDate(), maxDate);
    Assert.assertEquals("sequenceNumber from proto is not the same with original token", anotherToken.getSequenceNumber(), sequenceNumber);
    Assert.assertEquals("masterKeyId from proto is not the same with original token", anotherToken.getMasterKeyId(), masterKeyId);
}
Also used : DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) Text(org.apache.hadoop.io.Text) TimelineDelegationTokenIdentifier(org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier) Test(org.junit.Test)

Example 63 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.

the class TestPBRecordImpl method testLocalResourceStatusSerDe.

@Test(timeout = 10000)
public void testLocalResourceStatusSerDe() throws Exception {
    LocalResourceStatus rsrcS = createLocalResourceStatus();
    assertTrue(rsrcS instanceof LocalResourceStatusPBImpl);
    LocalResourceStatusPBImpl rsrcPb = (LocalResourceStatusPBImpl) rsrcS;
    DataOutputBuffer out = new DataOutputBuffer();
    rsrcPb.getProto().writeDelimitedTo(out);
    DataInputBuffer in = new DataInputBuffer();
    in.reset(out.getData(), 0, out.getLength());
    LocalResourceStatusProto rsrcPbD = LocalResourceStatusProto.parseDelimitedFrom(in);
    assertNotNull(rsrcPbD);
    LocalResourceStatus rsrcD = new LocalResourceStatusPBImpl(rsrcPbD);
    assertEquals(rsrcS, rsrcD);
    assertEquals(createResource(), rsrcS.getResource());
    assertEquals(createResource(), rsrcD.getResource());
}
Also used : LocalResourceStatusProto(org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalResourceStatusProto) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) LocalResourceStatus(org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus) Test(org.junit.Test)

Example 64 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.

the class ReduceTask method runNewReducer.

@SuppressWarnings("unchecked")
private <INKEY, INVALUE, OUTKEY, OUTVALUE> void runNewReducer(JobConf job, final TaskUmbilicalProtocol umbilical, final TaskReporter reporter, RawKeyValueIterator rIter, RawComparator<INKEY> comparator, Class<INKEY> keyClass, Class<INVALUE> valueClass) throws IOException, InterruptedException, ClassNotFoundException {
    // wrap value iterator to report progress.
    final RawKeyValueIterator rawIter = rIter;
    rIter = new RawKeyValueIterator() {

        public void close() throws IOException {
            rawIter.close();
        }

        public DataInputBuffer getKey() throws IOException {
            return rawIter.getKey();
        }

        public Progress getProgress() {
            return rawIter.getProgress();
        }

        public DataInputBuffer getValue() throws IOException {
            return rawIter.getValue();
        }

        public boolean next() throws IOException {
            boolean ret = rawIter.next();
            reporter.setProgress(rawIter.getProgress().getProgress());
            return ret;
        }
    };
    // make a task context so we can get the classes
    org.apache.hadoop.mapreduce.TaskAttemptContext taskContext = new org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl(job, getTaskID(), reporter);
    // make a reducer
    org.apache.hadoop.mapreduce.Reducer<INKEY, INVALUE, OUTKEY, OUTVALUE> reducer = (org.apache.hadoop.mapreduce.Reducer<INKEY, INVALUE, OUTKEY, OUTVALUE>) ReflectionUtils.newInstance(taskContext.getReducerClass(), job);
    org.apache.hadoop.mapreduce.RecordWriter<OUTKEY, OUTVALUE> trackedRW = new NewTrackingRecordWriter<OUTKEY, OUTVALUE>(this, taskContext);
    job.setBoolean("mapred.skip.on", isSkipping());
    job.setBoolean(JobContext.SKIP_RECORDS, isSkipping());
    org.apache.hadoop.mapreduce.Reducer.Context reducerContext = createReduceContext(reducer, job, getTaskID(), rIter, reduceInputKeyCounter, reduceInputValueCounter, trackedRW, committer, reporter, comparator, keyClass, valueClass);
    try {
        reducer.run(reducerContext);
    } finally {
        trackedRW.close(reducerContext);
    }
}
Also used : TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) Progress(org.apache.hadoop.util.Progress) IOException(java.io.IOException) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer)

Example 65 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.

the class TestViewfsFileStatus method testFileStatusSerialziation.

@Test
public void testFileStatusSerialziation() throws IOException, URISyntaxException {
    String testfilename = "testFileStatusSerialziation";
    TEST_DIR.mkdirs();
    File infile = new File(TEST_DIR, testfilename);
    final byte[] content = "dingos".getBytes();
    FileOutputStream fos = null;
    try {
        fos = new FileOutputStream(infile);
        fos.write(content);
    } finally {
        if (fos != null) {
            fos.close();
        }
    }
    assertEquals((long) content.length, infile.length());
    Configuration conf = new Configuration();
    ConfigUtil.addLink(conf, "/foo/bar/baz", TEST_DIR.toURI());
    FileSystem vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
    assertEquals(ViewFileSystem.class, vfs.getClass());
    FileStatus stat = vfs.getFileStatus(new Path("/foo/bar/baz", testfilename));
    assertEquals(content.length, stat.getLen());
    // check serialization/deserialization
    DataOutputBuffer dob = new DataOutputBuffer();
    stat.write(dob);
    DataInputBuffer dib = new DataInputBuffer();
    dib.reset(dob.getData(), 0, dob.getLength());
    FileStatus deSer = new FileStatus();
    deSer.readFields(dib);
    assertEquals(content.length, deSer.getLen());
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) Configuration(org.apache.hadoop.conf.Configuration) FileOutputStream(java.io.FileOutputStream) FileSystem(org.apache.hadoop.fs.FileSystem) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) File(java.io.File) Test(org.junit.Test)

Aggregations

DataInputBuffer (org.apache.hadoop.io.DataInputBuffer)112 Test (org.junit.Test)49 DataOutputBuffer (org.apache.hadoop.io.DataOutputBuffer)45 IOException (java.io.IOException)24 Text (org.apache.hadoop.io.Text)20 Path (org.apache.hadoop.fs.Path)16 Configuration (org.apache.hadoop.conf.Configuration)13 IntWritable (org.apache.hadoop.io.IntWritable)11 Random (java.util.Random)10 DataInputStream (java.io.DataInputStream)9 BufferedInputStream (java.io.BufferedInputStream)8 HashMap (java.util.HashMap)8 DataOutputStream (java.io.DataOutputStream)6 LongWritable (org.apache.hadoop.io.LongWritable)6 SerializationFactory (org.apache.hadoop.io.serializer.SerializationFactory)6 IFile (org.apache.tez.runtime.library.common.sort.impl.IFile)6 BufferedOutputStream (java.io.BufferedOutputStream)5 BytesWritable (org.apache.hadoop.io.BytesWritable)5 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)4 Credentials (org.apache.hadoop.security.Credentials)4