Search in sources :

Example 16 with RecordReader

use of org.apache.hadoop.mapreduce.RecordReader in project incubator-rya by apache.

the class StatementPatternStorageTest method createStorages.

protected List<StatementPatternStorage> createStorages(String location) throws IOException, InterruptedException {
    List<StatementPatternStorage> storages = new ArrayList<StatementPatternStorage>();
    StatementPatternStorage storage = new StatementPatternStorage();
    InputFormat inputFormat = storage.getInputFormat();
    Job job = new Job(new Configuration());
    storage.setLocation(location, job);
    List<InputSplit> splits = inputFormat.getSplits(job);
    assertNotNull(splits);
    for (InputSplit inputSplit : splits) {
        storage = new StatementPatternStorage();
        job = new Job(new Configuration());
        storage.setLocation(location, job);
        TaskAttemptContext taskAttemptContext = new TaskAttemptContextImpl(job.getConfiguration(), new TaskAttemptID("jtid", 0, false, 0, 0));
        RecordReader recordReader = inputFormat.createRecordReader(inputSplit, taskAttemptContext);
        recordReader.initialize(inputSplit, taskAttemptContext);
        storage.prepareToRead(recordReader, null);
        storages.add(storage);
    }
    return storages;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) AccumuloRdfConfiguration(org.apache.rya.accumulo.AccumuloRdfConfiguration) InputFormat(org.apache.hadoop.mapreduce.InputFormat) TaskAttemptID(org.apache.hadoop.mapreduce.TaskAttemptID) TaskAttemptContextImpl(org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl) RecordReader(org.apache.hadoop.mapreduce.RecordReader) ArrayList(java.util.ArrayList) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) Job(org.apache.hadoop.mapreduce.Job) InputSplit(org.apache.hadoop.mapreduce.InputSplit)

Example 17 with RecordReader

use of org.apache.hadoop.mapreduce.RecordReader in project incubator-rya by apache.

the class GraphXEdgeInputFormatTest method testInputFormat.

@SuppressWarnings("rawtypes")
@Test
public void testInputFormat() throws Exception {
    RyaStatement input = RyaStatement.builder().setSubject(new RyaURI("http://www.google.com")).setPredicate(new RyaURI("http://some_other_uri")).setObject(new RyaURI("http://www.yahoo.com")).setColumnVisibility(new byte[0]).setValue(new byte[0]).build();
    apiImpl.add(input);
    Job jobConf = Job.getInstance();
    GraphXEdgeInputFormat.setMockInstance(jobConf, instance.getInstanceName());
    GraphXEdgeInputFormat.setConnectorInfo(jobConf, username, password);
    GraphXEdgeInputFormat.setTableLayout(jobConf, TABLE_LAYOUT.SPO);
    GraphXEdgeInputFormat.setInputTableName(jobConf, table);
    GraphXEdgeInputFormat.setInputTableName(jobConf, table);
    GraphXEdgeInputFormat.setScanIsolation(jobConf, false);
    GraphXEdgeInputFormat.setLocalIterators(jobConf, false);
    GraphXEdgeInputFormat.setOfflineTableScan(jobConf, false);
    GraphXEdgeInputFormat inputFormat = new GraphXEdgeInputFormat();
    JobContext context = new JobContextImpl(jobConf.getConfiguration(), jobConf.getJobID());
    List<InputSplit> splits = inputFormat.getSplits(context);
    Assert.assertEquals(1, splits.size());
    TaskAttemptContext taskAttemptContext = new TaskAttemptContextImpl(context.getConfiguration(), new TaskAttemptID(new TaskID(), 1));
    RecordReader reader = inputFormat.createRecordReader(splits.get(0), taskAttemptContext);
    RecordReader ryaStatementRecordReader = (RecordReader) reader;
    ryaStatementRecordReader.initialize(splits.get(0), taskAttemptContext);
    List<Edge> results = new ArrayList<Edge>();
    while (ryaStatementRecordReader.nextKeyValue()) {
        Edge writable = (Edge) ryaStatementRecordReader.getCurrentValue();
        long srcId = writable.srcId();
        long destId = writable.dstId();
        RyaTypeWritable rtw = null;
        Object text = ryaStatementRecordReader.getCurrentKey();
        Edge<RyaTypeWritable> edge = new Edge<RyaTypeWritable>(srcId, destId, rtw);
        results.add(edge);
        System.out.println(text);
    }
    System.out.println(results.size());
    System.out.println(results);
    Assert.assertTrue(results.size() == 2);
}
Also used : JobContextImpl(org.apache.hadoop.mapreduce.task.JobContextImpl) TaskID(org.apache.hadoop.mapreduce.TaskID) TaskAttemptID(org.apache.hadoop.mapreduce.TaskAttemptID) RecordReader(org.apache.hadoop.mapreduce.RecordReader) ArrayList(java.util.ArrayList) RyaStatement(org.apache.rya.api.domain.RyaStatement) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) RyaURI(org.apache.rya.api.domain.RyaURI) TaskAttemptContextImpl(org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl) JobContext(org.apache.hadoop.mapreduce.JobContext) Job(org.apache.hadoop.mapreduce.Job) InputSplit(org.apache.hadoop.mapreduce.InputSplit) Edge(org.apache.spark.graphx.Edge) Test(org.junit.Test)

Example 18 with RecordReader

use of org.apache.hadoop.mapreduce.RecordReader in project incubator-rya by apache.

the class AccumuloHDFSFileInputFormat method createRecordReader.

@Override
public RecordReader<Key, Value> createRecordReader(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
    return new RecordReader<Key, Value>() {

        private FileSKVIterator fileSKVIterator;

        private boolean started = false;

        @Override
        public void initialize(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
            FileSplit split = (FileSplit) inputSplit;
            Configuration job = taskAttemptContext.getConfiguration();
            Path file = split.getPath();
            FileSystem fs = file.getFileSystem(job);
            Instance instance = MRUtils.AccumuloProps.getInstance(taskAttemptContext);
            fileSKVIterator = RFileOperations.getInstance().openReader(file.toString(), ALLRANGE, new HashSet<ByteSequence>(), false, fs, job, instance.getConfiguration());
        }

        @Override
        public boolean nextKeyValue() throws IOException, InterruptedException {
            if (started) {
                fileSKVIterator.next();
            } else {
                // don't move past the first record yet
                started = true;
            }
            return fileSKVIterator.hasTop();
        }

        @Override
        public Key getCurrentKey() throws IOException, InterruptedException {
            return fileSKVIterator.getTopKey();
        }

        @Override
        public Value getCurrentValue() throws IOException, InterruptedException {
            return fileSKVIterator.getTopValue();
        }

        @Override
        public float getProgress() throws IOException, InterruptedException {
            return 0;
        }

        @Override
        public void close() throws IOException {
        }
    };
}
Also used : Path(org.apache.hadoop.fs.Path) FileSKVIterator(org.apache.accumulo.core.file.FileSKVIterator) Configuration(org.apache.hadoop.conf.Configuration) Instance(org.apache.accumulo.core.client.Instance) RecordReader(org.apache.hadoop.mapreduce.RecordReader) FileSystem(org.apache.hadoop.fs.FileSystem) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) FileSplit(org.apache.hadoop.mapreduce.lib.input.FileSplit) InputSplit(org.apache.hadoop.mapreduce.InputSplit) HashSet(java.util.HashSet)

Example 19 with RecordReader

use of org.apache.hadoop.mapreduce.RecordReader in project carbondata by apache.

the class DistributableDataMapFormat method createRecordReader.

@Override
public RecordReader<Void, ExtendedBlocklet> createRecordReader(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
    return new RecordReader<Void, ExtendedBlocklet>() {

        private Iterator<ExtendedBlocklet> blockletIterator;

        private ExtendedBlocklet currBlocklet;

        @Override
        public void initialize(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
            DataMapDistributableWrapper distributable = (DataMapDistributableWrapper) inputSplit;
            TableDataMap dataMap = DataMapStoreManager.getInstance().getDataMap(table, distributable.getDistributable().getDataMapSchema());
            List<ExtendedBlocklet> blocklets = dataMap.prune(distributable.getDistributable(), dataMapExprWrapper.getFilterResolverIntf(distributable.getUniqueId()), partitions);
            for (ExtendedBlocklet blocklet : blocklets) {
                blocklet.setDataMapUniqueId(distributable.getUniqueId());
            }
            blockletIterator = blocklets.iterator();
        }

        @Override
        public boolean nextKeyValue() throws IOException, InterruptedException {
            boolean hasNext = blockletIterator.hasNext();
            if (hasNext) {
                currBlocklet = blockletIterator.next();
            }
            return hasNext;
        }

        @Override
        public Void getCurrentKey() throws IOException, InterruptedException {
            return null;
        }

        @Override
        public ExtendedBlocklet getCurrentValue() throws IOException, InterruptedException {
            return currBlocklet;
        }

        @Override
        public float getProgress() throws IOException, InterruptedException {
            return 0;
        }

        @Override
        public void close() throws IOException {
        }
    };
}
Also used : DataMapDistributableWrapper(org.apache.carbondata.core.datamap.dev.expr.DataMapDistributableWrapper) TableDataMap(org.apache.carbondata.core.datamap.TableDataMap) RecordReader(org.apache.hadoop.mapreduce.RecordReader) Iterator(java.util.Iterator) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) InputSplit(org.apache.hadoop.mapreduce.InputSplit) ExtendedBlocklet(org.apache.carbondata.core.indexstore.ExtendedBlocklet)

Example 20 with RecordReader

use of org.apache.hadoop.mapreduce.RecordReader in project hbase by apache.

the class TableInputFormatBase method createRecordReader.

/**
 * Builds a {@link TableRecordReader}. If no {@link TableRecordReader} was provided, uses
 * the default.
 *
 * @param split  The split to work with.
 * @param context  The current context.
 * @return The newly created record reader.
 * @throws IOException When creating the reader fails.
 * @see org.apache.hadoop.mapreduce.InputFormat#createRecordReader(
 *   org.apache.hadoop.mapreduce.InputSplit,
 *   org.apache.hadoop.mapreduce.TaskAttemptContext)
 */
@Override
public RecordReader<ImmutableBytesWritable, Result> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException {
    // Just in case a subclass is relying on JobConfigurable magic.
    if (table == null) {
        initialize(context);
    }
    // null check in case our child overrides getTable to not throw.
    try {
        if (getTable() == null) {
            // initialize() must not have been implemented in the subclass.
            throw new IOException(INITIALIZATION_ERROR);
        }
    } catch (IllegalStateException exception) {
        throw new IOException(INITIALIZATION_ERROR, exception);
    }
    TableSplit tSplit = (TableSplit) split;
    LOG.info("Input split length: " + StringUtils.humanReadableInt(tSplit.getLength()) + " bytes.");
    final TableRecordReader trr = this.tableRecordReader != null ? this.tableRecordReader : new TableRecordReader();
    Scan sc = new Scan(this.scan);
    sc.withStartRow(tSplit.getStartRow());
    sc.withStopRow(tSplit.getEndRow());
    trr.setScan(sc);
    trr.setTable(getTable());
    return new RecordReader<ImmutableBytesWritable, Result>() {

        @Override
        public void close() throws IOException {
            trr.close();
            closeTable();
        }

        @Override
        public ImmutableBytesWritable getCurrentKey() throws IOException, InterruptedException {
            return trr.getCurrentKey();
        }

        @Override
        public Result getCurrentValue() throws IOException, InterruptedException {
            return trr.getCurrentValue();
        }

        @Override
        public float getProgress() throws IOException, InterruptedException {
            return trr.getProgress();
        }

        @Override
        public void initialize(InputSplit inputsplit, TaskAttemptContext context) throws IOException, InterruptedException {
            trr.initialize(inputsplit, context);
        }

        @Override
        public boolean nextKeyValue() throws IOException, InterruptedException {
            return trr.nextKeyValue();
        }
    };
}
Also used : RecordReader(org.apache.hadoop.mapreduce.RecordReader) Scan(org.apache.hadoop.hbase.client.Scan) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) IllegalArgumentIOException(org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException) IOException(java.io.IOException) InputSplit(org.apache.hadoop.mapreduce.InputSplit)

Aggregations

RecordReader (org.apache.hadoop.mapreduce.RecordReader)24 TaskAttemptContext (org.apache.hadoop.mapreduce.TaskAttemptContext)17 Configuration (org.apache.hadoop.conf.Configuration)13 InputSplit (org.apache.hadoop.mapreduce.InputSplit)13 TaskAttemptID (org.apache.hadoop.mapreduce.TaskAttemptID)11 TaskAttemptContextImpl (org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl)11 InputFormat (org.apache.hadoop.mapreduce.InputFormat)9 Job (org.apache.hadoop.mapreduce.Job)8 Test (org.junit.Test)8 ArrayList (java.util.ArrayList)7 Path (org.apache.hadoop.fs.Path)7 FileSplit (org.apache.hadoop.mapreduce.lib.input.FileSplit)6 IOException (java.io.IOException)4 File (java.io.File)3 FileSystem (org.apache.hadoop.fs.FileSystem)3 Mapper (org.apache.hadoop.mapreduce.Mapper)3 WrappedMapper (org.apache.hadoop.mapreduce.lib.map.WrappedMapper)3 Scan (org.apache.hadoop.hbase.client.Scan)2 RecordWriter (org.apache.hadoop.mapreduce.RecordWriter)2 JobContextImpl (org.apache.hadoop.mapreduce.task.JobContextImpl)2