use of org.apache.hadoop.io.Writable in project hadoop by apache.
the class TestJoinTupleWritable method testSparseWideWritable.
/**
* Tests a tuple writable with more than 64 values and the values set written
* spread far apart.
*/
@Test
public void testSparseWideWritable() throws Exception {
Writable[] manyWrits = makeRandomWritables(131);
TupleWritable sTuple = new TupleWritable(manyWrits);
for (int i = 0; i < manyWrits.length; i++) {
if (i % 65 == 0) {
sTuple.setWritten(i);
}
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
sTuple.write(new DataOutputStream(out));
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
TupleWritable dTuple = new TupleWritable();
dTuple.readFields(new DataInputStream(in));
assertTrue("Failed to write/read tuple", sTuple.equals(dTuple));
assertEquals("All tuple data has not been read from the stream", -1, in.read());
}
use of org.apache.hadoop.io.Writable in project hadoop by apache.
the class TestJoinTupleWritable method testNestedIterable.
@Test
public void testNestedIterable() throws Exception {
Random r = new Random();
Writable[] writs = { new BooleanWritable(r.nextBoolean()), new FloatWritable(r.nextFloat()), new FloatWritable(r.nextFloat()), new IntWritable(r.nextInt()), new LongWritable(r.nextLong()), new BytesWritable("dingo".getBytes()), new LongWritable(r.nextLong()), new IntWritable(r.nextInt()), new BytesWritable("yak".getBytes()), new IntWritable(r.nextInt()) };
TupleWritable sTuple = makeTuple(writs);
assertTrue("Bad count", writs.length == verifIter(writs, sTuple, 0));
}
use of org.apache.hadoop.io.Writable in project hadoop by apache.
the class TestJoinTupleWritable method testWideWritable2.
@Test
public void testWideWritable2() throws Exception {
Writable[] manyWrits = makeRandomWritables(71);
TupleWritable sTuple = new TupleWritable(manyWrits);
for (int i = 0; i < manyWrits.length; i++) {
sTuple.setWritten(i);
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
sTuple.write(new DataOutputStream(out));
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
TupleWritable dTuple = new TupleWritable();
dTuple.readFields(new DataInputStream(in));
assertTrue("Failed to write/read tuple", sTuple.equals(dTuple));
assertEquals("All tuple data has not been read from the stream", -1, in.read());
}
use of org.apache.hadoop.io.Writable in project hadoop by apache.
the class TestJoinTupleWritable method testWritable.
@Test
public void testWritable() throws Exception {
Random r = new Random();
Writable[] writs = { new BooleanWritable(r.nextBoolean()), new FloatWritable(r.nextFloat()), new FloatWritable(r.nextFloat()), new IntWritable(r.nextInt()), new LongWritable(r.nextLong()), new BytesWritable("dingo".getBytes()), new LongWritable(r.nextLong()), new IntWritable(r.nextInt()), new BytesWritable("yak".getBytes()), new IntWritable(r.nextInt()) };
TupleWritable sTuple = makeTuple(writs);
ByteArrayOutputStream out = new ByteArrayOutputStream();
sTuple.write(new DataOutputStream(out));
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
TupleWritable dTuple = new TupleWritable();
dTuple.readFields(new DataInputStream(in));
assertTrue("Failed to write/read tuple", sTuple.equals(dTuple));
}
use of org.apache.hadoop.io.Writable in project hadoop by apache.
the class TestJoinProperties method testFormat.
@SuppressWarnings("unchecked")
public int testFormat(Configuration conf, int tupleSize, boolean firstTuple, boolean secondTuple, TestType ttype) throws Exception {
Job job = Job.getInstance(conf);
CompositeInputFormat format = new CompositeInputFormat();
int count = 0;
for (InputSplit split : (List<InputSplit>) format.getSplits(job)) {
TaskAttemptContext context = MapReduceTestUtil.createDummyMapTaskAttemptContext(conf);
RecordReader reader = format.createRecordReader(split, context);
MapContext mcontext = new MapContextImpl(conf, context.getTaskAttemptID(), reader, null, null, MapReduceTestUtil.createDummyReporter(), split);
reader.initialize(split, mcontext);
WritableComparable key = null;
Writable value = null;
while (reader.nextKeyValue()) {
key = (WritableComparable) reader.getCurrentKey();
value = (Writable) reader.getCurrentValue();
validateKeyValue(key, value, tupleSize, firstTuple, secondTuple, ttype);
count++;
}
}
return count;
}
Aggregations