use of org.apache.hadoop.io.serializer.Serialization in project hadoop by apache.
the class Chain method getMapperCollector.
/**
* Returns the OutputCollector to be used by a Mapper instance in the chain.
*
* @param mapperIndex index of the Mapper instance to get the OutputCollector.
* @param output the original OutputCollector of the task.
* @param reporter the reporter of the task.
* @return the OutputCollector to be used in the chain.
*/
@SuppressWarnings({ "unchecked" })
public OutputCollector getMapperCollector(int mapperIndex, OutputCollector output, Reporter reporter) {
Serialization keySerialization = mappersKeySerialization.get(mapperIndex);
Serialization valueSerialization = mappersValueSerialization.get(mapperIndex);
return new ChainOutputCollector(mapperIndex, keySerialization, valueSerialization, output, reporter);
}
use of org.apache.hadoop.io.serializer.Serialization in project ignite by apache.
the class HadoopV2TaskContext method readExternalSplit.
/**
* @param split External split.
* @return Native input split.
* @throws IgniteCheckedException If failed.
*/
@SuppressWarnings("unchecked")
private Object readExternalSplit(HadoopExternalSplit split) throws IgniteCheckedException {
Path jobDir = new Path(jobConf().get(MRJobConfig.MAPREDUCE_JOB_DIR));
FileSystem fs;
try {
fs = fileSystemForMrUserWithCaching(jobDir.toUri(), jobConf(), fsMap);
} catch (IOException e) {
throw new IgniteCheckedException(e);
}
try (FSDataInputStream in = fs.open(JobSubmissionFiles.getJobSplitFile(jobDir))) {
in.seek(split.offset());
String clsName = Text.readString(in);
Class<?> cls = jobConf().getClassByName(clsName);
assert cls != null;
Serialization serialization = new SerializationFactory(jobConf()).getSerialization(cls);
Deserializer deserializer = serialization.getDeserializer(cls);
deserializer.open(in);
Object res = deserializer.deserialize(null);
deserializer.close();
assert res != null;
return res;
} catch (IOException | ClassNotFoundException e) {
throw new IgniteCheckedException(e);
}
}
Aggregations