use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestYARNTokenIdentifier method testAMRMTokenIdentifier.
@Test
public void testAMRMTokenIdentifier() throws IOException {
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(ApplicationId.newInstance(1, 1), 1);
int masterKeyId = 1;
AMRMTokenIdentifier token = new AMRMTokenIdentifier(appAttemptId, masterKeyId);
AMRMTokenIdentifier anotherToken = new AMRMTokenIdentifier();
byte[] tokenContent = token.getBytes();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenContent, tokenContent.length);
anotherToken.readFields(dib);
// verify the whole record equals with original record
Assert.assertEquals("Token is not the same after serialization " + "and deserialization.", token, anotherToken);
Assert.assertEquals("ApplicationAttemptId from proto is not the same with original token", anotherToken.getApplicationAttemptId(), appAttemptId);
Assert.assertEquals("masterKeyId from proto is not the same with original token", anotherToken.getKeyId(), masterKeyId);
}
use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestYARNTokenIdentifier method testTimelineDelegationTokenIdentifier.
@Test
public void testTimelineDelegationTokenIdentifier() throws IOException {
Text owner = new Text("user1");
Text renewer = new Text("user2");
Text realUser = new Text("user3");
long issueDate = 1;
long maxDate = 2;
int sequenceNumber = 3;
int masterKeyId = 4;
TimelineDelegationTokenIdentifier token = new TimelineDelegationTokenIdentifier(owner, renewer, realUser);
token.setIssueDate(issueDate);
token.setMaxDate(maxDate);
token.setSequenceNumber(sequenceNumber);
token.setMasterKeyId(masterKeyId);
TimelineDelegationTokenIdentifier anotherToken = new TimelineDelegationTokenIdentifier();
byte[] tokenContent = token.getBytes();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenContent, tokenContent.length);
anotherToken.readFields(dib);
// verify the whole record equals with original record
Assert.assertEquals("Token is not the same after serialization " + "and deserialization.", token, anotherToken);
Assert.assertEquals("owner from proto is not the same with original token", anotherToken.getOwner(), owner);
Assert.assertEquals("renewer from proto is not the same with original token", anotherToken.getRenewer(), renewer);
Assert.assertEquals("realUser from proto is not the same with original token", anotherToken.getRealUser(), realUser);
Assert.assertEquals("issueDate from proto is not the same with original token", anotherToken.getIssueDate(), issueDate);
Assert.assertEquals("maxDate from proto is not the same with original token", anotherToken.getMaxDate(), maxDate);
Assert.assertEquals("sequenceNumber from proto is not the same with original token", anotherToken.getSequenceNumber(), sequenceNumber);
Assert.assertEquals("masterKeyId from proto is not the same with original token", anotherToken.getMasterKeyId(), masterKeyId);
}
use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestPBRecordImpl method testLocalResourceStatusSerDe.
@Test(timeout = 10000)
public void testLocalResourceStatusSerDe() throws Exception {
LocalResourceStatus rsrcS = createLocalResourceStatus();
assertTrue(rsrcS instanceof LocalResourceStatusPBImpl);
LocalResourceStatusPBImpl rsrcPb = (LocalResourceStatusPBImpl) rsrcS;
DataOutputBuffer out = new DataOutputBuffer();
rsrcPb.getProto().writeDelimitedTo(out);
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), 0, out.getLength());
LocalResourceStatusProto rsrcPbD = LocalResourceStatusProto.parseDelimitedFrom(in);
assertNotNull(rsrcPbD);
LocalResourceStatus rsrcD = new LocalResourceStatusPBImpl(rsrcPbD);
assertEquals(rsrcS, rsrcD);
assertEquals(createResource(), rsrcS.getResource());
assertEquals(createResource(), rsrcD.getResource());
}
use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class ReduceTask method runNewReducer.
@SuppressWarnings("unchecked")
private <INKEY, INVALUE, OUTKEY, OUTVALUE> void runNewReducer(JobConf job, final TaskUmbilicalProtocol umbilical, final TaskReporter reporter, RawKeyValueIterator rIter, RawComparator<INKEY> comparator, Class<INKEY> keyClass, Class<INVALUE> valueClass) throws IOException, InterruptedException, ClassNotFoundException {
// wrap value iterator to report progress.
final RawKeyValueIterator rawIter = rIter;
rIter = new RawKeyValueIterator() {
public void close() throws IOException {
rawIter.close();
}
public DataInputBuffer getKey() throws IOException {
return rawIter.getKey();
}
public Progress getProgress() {
return rawIter.getProgress();
}
public DataInputBuffer getValue() throws IOException {
return rawIter.getValue();
}
public boolean next() throws IOException {
boolean ret = rawIter.next();
reporter.setProgress(rawIter.getProgress().getProgress());
return ret;
}
};
// make a task context so we can get the classes
org.apache.hadoop.mapreduce.TaskAttemptContext taskContext = new org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl(job, getTaskID(), reporter);
// make a reducer
org.apache.hadoop.mapreduce.Reducer<INKEY, INVALUE, OUTKEY, OUTVALUE> reducer = (org.apache.hadoop.mapreduce.Reducer<INKEY, INVALUE, OUTKEY, OUTVALUE>) ReflectionUtils.newInstance(taskContext.getReducerClass(), job);
org.apache.hadoop.mapreduce.RecordWriter<OUTKEY, OUTVALUE> trackedRW = new NewTrackingRecordWriter<OUTKEY, OUTVALUE>(this, taskContext);
job.setBoolean("mapred.skip.on", isSkipping());
job.setBoolean(JobContext.SKIP_RECORDS, isSkipping());
org.apache.hadoop.mapreduce.Reducer.Context reducerContext = createReduceContext(reducer, job, getTaskID(), rIter, reduceInputKeyCounter, reduceInputValueCounter, trackedRW, committer, reporter, comparator, keyClass, valueClass);
try {
reducer.run(reducerContext);
} finally {
trackedRW.close(reducerContext);
}
}
use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestViewfsFileStatus method testFileStatusSerialziation.
@Test
public void testFileStatusSerialziation() throws IOException, URISyntaxException {
String testfilename = "testFileStatusSerialziation";
TEST_DIR.mkdirs();
File infile = new File(TEST_DIR, testfilename);
final byte[] content = "dingos".getBytes();
FileOutputStream fos = null;
try {
fos = new FileOutputStream(infile);
fos.write(content);
} finally {
if (fos != null) {
fos.close();
}
}
assertEquals((long) content.length, infile.length());
Configuration conf = new Configuration();
ConfigUtil.addLink(conf, "/foo/bar/baz", TEST_DIR.toURI());
FileSystem vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
assertEquals(ViewFileSystem.class, vfs.getClass());
FileStatus stat = vfs.getFileStatus(new Path("/foo/bar/baz", testfilename));
assertEquals(content.length, stat.getLen());
// check serialization/deserialization
DataOutputBuffer dob = new DataOutputBuffer();
stat.write(dob);
DataInputBuffer dib = new DataInputBuffer();
dib.reset(dob.getData(), 0, dob.getLength());
FileStatus deSer = new FileStatus();
deSer.readFields(dib);
assertEquals(content.length, deSer.getLen());
}
Aggregations