use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestJspHelper method testReadWriteReplicaState.
@Test
public void testReadWriteReplicaState() {
try {
DataOutputBuffer out = new DataOutputBuffer();
DataInputBuffer in = new DataInputBuffer();
for (HdfsServerConstants.ReplicaState repState : HdfsServerConstants.ReplicaState.values()) {
repState.write(out);
in.reset(out.getData(), out.getLength());
HdfsServerConstants.ReplicaState result = HdfsServerConstants.ReplicaState.read(in);
assertTrue("testReadWrite error !!!", repState == result);
out.reset();
in.reset();
}
} catch (Exception ex) {
fail("testReadWrite ex error ReplicaState");
}
}
use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestYARNTokenIdentifier method testNMTokenIdentifier.
@Test
public void testNMTokenIdentifier() throws IOException {
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(ApplicationId.newInstance(1, 1), 1);
NodeId nodeId = NodeId.newInstance("host0", 0);
String applicationSubmitter = "usr0";
int masterKeyId = 1;
NMTokenIdentifier token = new NMTokenIdentifier(appAttemptId, nodeId, applicationSubmitter, masterKeyId);
NMTokenIdentifier anotherToken = new NMTokenIdentifier();
byte[] tokenContent = token.getBytes();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenContent, tokenContent.length);
anotherToken.readFields(dib);
// verify the whole record equals with original record
Assert.assertEquals("Token is not the same after serialization " + "and deserialization.", token, anotherToken);
// verify all properties are the same as original
Assert.assertEquals("appAttemptId from proto is not the same with original token", anotherToken.getApplicationAttemptId(), appAttemptId);
Assert.assertEquals("NodeId from proto is not the same with original token", anotherToken.getNodeId(), nodeId);
Assert.assertEquals("applicationSubmitter from proto is not the same with original token", anotherToken.getApplicationSubmitter(), applicationSubmitter);
Assert.assertEquals("masterKeyId from proto is not the same with original token", anotherToken.getKeyId(), masterKeyId);
}
use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestWritableJobConf method serDeser.
private <K> K serDeser(K conf) throws Exception {
SerializationFactory factory = new SerializationFactory(CONF);
Serializer<K> serializer = factory.getSerializer(GenericsUtil.getClass(conf));
Deserializer<K> deserializer = factory.getDeserializer(GenericsUtil.getClass(conf));
DataOutputBuffer out = new DataOutputBuffer();
serializer.open(out);
serializer.serialize(conf);
serializer.close();
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), out.getLength());
deserializer.open(in);
K after = deserializer.deserialize(null);
deserializer.close();
return after;
}
use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestSequenceFileAsBinaryInputFormat method testBinary.
@Test
public void testBinary() throws IOException {
JobConf job = new JobConf();
FileSystem fs = FileSystem.getLocal(job);
Path dir = new Path(System.getProperty("test.build.data", ".") + "/mapred");
Path file = new Path(dir, "testbinary.seq");
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
fs.delete(dir, true);
FileInputFormat.setInputPaths(job, dir);
Text tkey = new Text();
Text tval = new Text();
SequenceFile.Writer writer = new SequenceFile.Writer(fs, job, file, Text.class, Text.class);
try {
for (int i = 0; i < RECORDS; ++i) {
tkey.set(Integer.toString(r.nextInt(), 36));
tval.set(Long.toString(r.nextLong(), 36));
writer.append(tkey, tval);
}
} finally {
writer.close();
}
InputFormat<BytesWritable, BytesWritable> bformat = new SequenceFileAsBinaryInputFormat();
int count = 0;
r.setSeed(seed);
BytesWritable bkey = new BytesWritable();
BytesWritable bval = new BytesWritable();
Text cmpkey = new Text();
Text cmpval = new Text();
DataInputBuffer buf = new DataInputBuffer();
final int NUM_SPLITS = 3;
FileInputFormat.setInputPaths(job, file);
for (InputSplit split : bformat.getSplits(job, NUM_SPLITS)) {
RecordReader<BytesWritable, BytesWritable> reader = bformat.getRecordReader(split, job, Reporter.NULL);
try {
while (reader.next(bkey, bval)) {
tkey.set(Integer.toString(r.nextInt(), 36));
tval.set(Long.toString(r.nextLong(), 36));
buf.reset(bkey.getBytes(), bkey.getLength());
cmpkey.readFields(buf);
buf.reset(bval.getBytes(), bval.getLength());
cmpval.readFields(buf);
assertTrue("Keys don't match: " + "*" + cmpkey.toString() + ":" + tkey.toString() + "*", cmpkey.toString().equals(tkey.toString()));
assertTrue("Vals don't match: " + "*" + cmpval.toString() + ":" + tval.toString() + "*", cmpval.toString().equals(tval.toString()));
++count;
}
} finally {
reader.close();
}
}
assertEquals("Some records not found", RECORDS, count);
}
use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestSequenceFileAsBinaryOutputFormat method testBinary.
@Test
public void testBinary() throws IOException {
JobConf job = new JobConf();
FileSystem fs = FileSystem.getLocal(job);
Path dir = new Path(new Path(new Path(System.getProperty("test.build.data", ".")), FileOutputCommitter.TEMP_DIR_NAME), "_" + attempt);
Path file = new Path(dir, "testbinary.seq");
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
fs.delete(dir, true);
if (!fs.mkdirs(dir)) {
fail("Failed to create output directory");
}
job.set(JobContext.TASK_ATTEMPT_ID, attempt);
FileOutputFormat.setOutputPath(job, dir.getParent().getParent());
FileOutputFormat.setWorkOutputPath(job, dir);
SequenceFileAsBinaryOutputFormat.setSequenceFileOutputKeyClass(job, IntWritable.class);
SequenceFileAsBinaryOutputFormat.setSequenceFileOutputValueClass(job, DoubleWritable.class);
SequenceFileAsBinaryOutputFormat.setCompressOutput(job, true);
SequenceFileAsBinaryOutputFormat.setOutputCompressionType(job, CompressionType.BLOCK);
BytesWritable bkey = new BytesWritable();
BytesWritable bval = new BytesWritable();
RecordWriter<BytesWritable, BytesWritable> writer = new SequenceFileAsBinaryOutputFormat().getRecordWriter(fs, job, file.toString(), Reporter.NULL);
IntWritable iwritable = new IntWritable();
DoubleWritable dwritable = new DoubleWritable();
DataOutputBuffer outbuf = new DataOutputBuffer();
LOG.info("Creating data by SequenceFileAsBinaryOutputFormat");
try {
for (int i = 0; i < RECORDS; ++i) {
iwritable = new IntWritable(r.nextInt());
iwritable.write(outbuf);
bkey.set(outbuf.getData(), 0, outbuf.getLength());
outbuf.reset();
dwritable = new DoubleWritable(r.nextDouble());
dwritable.write(outbuf);
bval.set(outbuf.getData(), 0, outbuf.getLength());
outbuf.reset();
writer.write(bkey, bval);
}
} finally {
writer.close(Reporter.NULL);
}
InputFormat<IntWritable, DoubleWritable> iformat = new SequenceFileInputFormat<IntWritable, DoubleWritable>();
int count = 0;
r.setSeed(seed);
DataInputBuffer buf = new DataInputBuffer();
final int NUM_SPLITS = 3;
SequenceFileInputFormat.addInputPath(job, file);
LOG.info("Reading data by SequenceFileInputFormat");
for (InputSplit split : iformat.getSplits(job, NUM_SPLITS)) {
RecordReader<IntWritable, DoubleWritable> reader = iformat.getRecordReader(split, job, Reporter.NULL);
try {
int sourceInt;
double sourceDouble;
while (reader.next(iwritable, dwritable)) {
sourceInt = r.nextInt();
sourceDouble = r.nextDouble();
assertEquals("Keys don't match: " + "*" + iwritable.get() + ":" + sourceInt + "*", sourceInt, iwritable.get());
assertTrue("Vals don't match: " + "*" + dwritable.get() + ":" + sourceDouble + "*", Double.compare(dwritable.get(), sourceDouble) == 0);
++count;
}
} finally {
reader.close();
}
}
assertEquals("Some records not found", RECORDS, count);
}
Aggregations