use of org.apache.hadoop.io.Writable in project hadoop by apache.
the class TestTupleWritable method testPreVersion21CompatibilityEmptyTuple.
@Test
public void testPreVersion21CompatibilityEmptyTuple() throws Exception {
Writable[] manyWrits = new Writable[0];
PreVersion21TupleWritable oldTuple = new PreVersion21TupleWritable(manyWrits);
// don't set any values written
ByteArrayOutputStream out = new ByteArrayOutputStream();
oldTuple.write(new DataOutputStream(out));
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
TupleWritable dTuple = new TupleWritable();
dTuple.readFields(new DataInputStream(in));
assertTrue("Tuple writable is unable to read pre-0.21 versions of TupleWritable", oldTuple.isCompatible(dTuple));
assertEquals("All tuple data has not been read from the stream", -1, in.read());
}
use of org.apache.hadoop.io.Writable in project hadoop by apache.
the class DumpTypedBytes method dumpTypedBytes.
/**
* Dump given list of files to standard output as typed bytes.
*/
@SuppressWarnings("unchecked")
private int dumpTypedBytes(List<FileStatus> files) throws IOException {
JobConf job = new JobConf(getConf());
DataOutputStream dout = new DataOutputStream(System.out);
AutoInputFormat autoInputFormat = new AutoInputFormat();
for (FileStatus fileStatus : files) {
FileSplit split = new FileSplit(fileStatus.getPath(), 0, fileStatus.getLen() * fileStatus.getBlockSize(), (String[]) null);
RecordReader recReader = null;
try {
recReader = autoInputFormat.getRecordReader(split, job, Reporter.NULL);
Object key = recReader.createKey();
Object value = recReader.createValue();
while (recReader.next(key, value)) {
if (key instanceof Writable) {
TypedBytesWritableOutput.get(dout).write((Writable) key);
} else {
TypedBytesOutput.get(dout).write(key);
}
if (value instanceof Writable) {
TypedBytesWritableOutput.get(dout).write((Writable) value);
} else {
TypedBytesOutput.get(dout).write(value);
}
}
} finally {
if (recReader != null) {
recReader.close();
}
}
}
dout.flush();
return 0;
}
use of org.apache.hadoop.io.Writable in project hadoop by apache.
the class TypedBytesWritableInput method readArray.
public ArrayWritable readArray(ArrayWritable aw) throws IOException {
if (aw == null) {
aw = new ArrayWritable(TypedBytesWritable.class);
} else if (!aw.getValueClass().equals(TypedBytesWritable.class)) {
throw new RuntimeException("value class has to be TypedBytesWritable");
}
int length = in.readVectorHeader();
Writable[] writables = new Writable[length];
for (int i = 0; i < length; i++) {
writables[i] = new TypedBytesWritable(in.readRaw());
}
aw.set(writables);
return aw;
}
use of org.apache.hadoop.io.Writable in project hadoop by apache.
the class TypedBytesWritableInput method readMap.
public MapWritable readMap(MapWritable mw) throws IOException {
if (mw == null) {
mw = new MapWritable();
}
int length = in.readMapHeader();
for (int i = 0; i < length; i++) {
Writable key = read();
Writable value = read();
mw.put(key, value);
}
return mw;
}
use of org.apache.hadoop.io.Writable in project hadoop by apache.
the class TypedBytesWritableInput method readSortedMap.
public <K extends WritableComparable<? super K>> SortedMapWritable<K> readSortedMap(SortedMapWritable<K> mw) throws IOException {
if (mw == null) {
mw = new SortedMapWritable<K>();
}
int length = in.readMapHeader();
for (int i = 0; i < length; i++) {
@SuppressWarnings("unchecked") K key = (K) read();
Writable value = read();
mw.put(key, value);
}
return mw;
}
Aggregations