Search in sources :

Example 16 with RecordReader

use of org.apache.hadoop.mapred.RecordReader in project voldemort by voldemort.

the class JsonSequenceFileInputFormat method getRecordReader.

@Override
public RecordReader<BytesWritable, BytesWritable> getRecordReader(InputSplit split, JobConf conf, Reporter reporter) throws IOException {
    String inputPathString = ((FileSplit) split).getPath().toUri().getPath();
    log.info("Input file path:" + inputPathString);
    Path inputPath = new Path(inputPathString);
    SequenceFile.Reader reader = new SequenceFile.Reader(inputPath.getFileSystem(conf), inputPath, conf);
    SequenceFile.Metadata meta = reader.getMetadata();
    try {
        Text keySchema = meta.get(new Text("key.schema"));
        Text valueSchema = meta.get(new Text("value.schema"));
        if (0 == keySchema.getLength() || 0 == valueSchema.getLength()) {
            throw new Exception();
        }
        // update Joboconf with schemas
        conf.set("mapper.input.key.schema", keySchema.toString());
        conf.set("mapper.input.value.schema", valueSchema.toString());
    } catch (Exception e) {
        throw new IOException("Failed to Load Schema from file:" + inputPathString + "\n");
    }
    return super.getRecordReader(split, conf, reporter);
}
Also used : Path(org.apache.hadoop.fs.Path) SequenceFile(org.apache.hadoop.io.SequenceFile) RecordReader(org.apache.hadoop.mapred.RecordReader) Text(org.apache.hadoop.io.Text) IOException(java.io.IOException) FileSplit(org.apache.hadoop.mapred.FileSplit) IOException(java.io.IOException)

Example 17 with RecordReader

use of org.apache.hadoop.mapred.RecordReader in project ignite by apache.

the class HadoopV1MapTask method run.

/** {@inheritDoc} */
@SuppressWarnings("unchecked")
@Override
public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
    HadoopJobEx job = taskCtx.job();
    HadoopV2TaskContext taskCtx0 = (HadoopV2TaskContext) taskCtx;
    if (taskCtx.taskInfo().hasMapperIndex())
        HadoopMapperUtils.mapperIndex(taskCtx.taskInfo().mapperIndex());
    else
        HadoopMapperUtils.clearMapperIndex();
    try {
        JobConf jobConf = taskCtx0.jobConf();
        InputFormat inFormat = jobConf.getInputFormat();
        HadoopInputSplit split = info().inputSplit();
        InputSplit nativeSplit;
        if (split instanceof HadoopFileBlock) {
            HadoopFileBlock block = (HadoopFileBlock) split;
            nativeSplit = new FileSplit(new Path(block.file().toString()), block.start(), block.length(), EMPTY_HOSTS);
        } else
            nativeSplit = (InputSplit) taskCtx0.getNativeSplit(split);
        assert nativeSplit != null;
        Reporter reporter = new HadoopV1Reporter(taskCtx);
        HadoopV1OutputCollector collector = null;
        try {
            collector = collector(jobConf, taskCtx0, !job.info().hasCombiner() && !job.info().hasReducer(), fileName(), taskCtx0.attemptId());
            RecordReader reader = inFormat.getRecordReader(nativeSplit, jobConf, reporter);
            Mapper mapper = ReflectionUtils.newInstance(jobConf.getMapperClass(), jobConf);
            Object key = reader.createKey();
            Object val = reader.createValue();
            assert mapper != null;
            try {
                try {
                    while (reader.next(key, val)) {
                        if (isCancelled())
                            throw new HadoopTaskCancelledException("Map task cancelled.");
                        mapper.map(key, val, collector, reporter);
                    }
                    taskCtx.onMapperFinished();
                } finally {
                    mapper.close();
                }
            } finally {
                collector.closeWriter();
            }
            collector.commit();
        } catch (Exception e) {
            if (collector != null)
                collector.abort();
            throw new IgniteCheckedException(e);
        }
    } finally {
        HadoopMapperUtils.clearMapperIndex();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Reporter(org.apache.hadoop.mapred.Reporter) RecordReader(org.apache.hadoop.mapred.RecordReader) HadoopInputSplit(org.apache.ignite.hadoop.HadoopInputSplit) HadoopFileBlock(org.apache.ignite.internal.processors.hadoop.HadoopFileBlock) FileSplit(org.apache.hadoop.mapred.FileSplit) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) HadoopTaskCancelledException(org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException) Mapper(org.apache.hadoop.mapred.Mapper) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) HadoopJobEx(org.apache.ignite.internal.processors.hadoop.HadoopJobEx) InputFormat(org.apache.hadoop.mapred.InputFormat) HadoopTaskCancelledException(org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException) JobConf(org.apache.hadoop.mapred.JobConf) HadoopInputSplit(org.apache.ignite.hadoop.HadoopInputSplit) InputSplit(org.apache.hadoop.mapred.InputSplit) HadoopV2TaskContext(org.apache.ignite.internal.processors.hadoop.impl.v2.HadoopV2TaskContext)

Aggregations

RecordReader (org.apache.hadoop.mapred.RecordReader)17 Path (org.apache.hadoop.fs.Path)9 FileSplit (org.apache.hadoop.mapred.FileSplit)8 IOException (java.io.IOException)7 JobConf (org.apache.hadoop.mapred.JobConf)7 InputSplit (org.apache.hadoop.mapred.InputSplit)6 FileSystem (org.apache.hadoop.fs.FileSystem)5 InputFormat (org.apache.hadoop.mapred.InputFormat)4 Text (org.apache.hadoop.io.Text)3 Configuration (org.apache.hadoop.conf.Configuration)2 SerDeException (org.apache.hadoop.hive.serde2.SerDeException)2 StructObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector)2 SequenceFile (org.apache.hadoop.io.SequenceFile)2 Reporter (org.apache.hadoop.mapred.Reporter)2 PARTITION_KEY (com.facebook.presto.hive.HiveColumnHandle.ColumnType.PARTITION_KEY)1 REGULAR (com.facebook.presto.hive.HiveColumnHandle.ColumnType.REGULAR)1 HiveColumnHandle.bucketColumnHandle (com.facebook.presto.hive.HiveColumnHandle.bucketColumnHandle)1 HiveColumnHandle.isBucketColumnHandle (com.facebook.presto.hive.HiveColumnHandle.isBucketColumnHandle)1 HiveColumnHandle.isPathColumnHandle (com.facebook.presto.hive.HiveColumnHandle.isPathColumnHandle)1 HiveColumnHandle.pathColumnHandle (com.facebook.presto.hive.HiveColumnHandle.pathColumnHandle)1