Search in sources :

Example 1 with HCatInputFormat

use of org.apache.hive.hcatalog.mapreduce.HCatInputFormat in project hive by apache.

the class HCatInputFormatReader method prepareRead.

@Override
public ReaderContext prepareRead() throws HCatException {
    try {
        Job job = new Job(conf);
        HCatInputFormat hcif = HCatInputFormat.setInput(job, re.getDbName(), re.getTableName(), re.getFilterString());
        ReaderContextImpl cntxt = new ReaderContextImpl();
        cntxt.setInputSplits(hcif.getSplits(ShimLoader.getHadoopShims().getHCatShim().createJobContext(job.getConfiguration(), null)));
        cntxt.setConf(job.getConfiguration());
        return cntxt;
    } catch (IOException e) {
        throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    } catch (InterruptedException e) {
        throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    }
}
Also used : HCatException(org.apache.hive.hcatalog.common.HCatException) IOException(java.io.IOException) Job(org.apache.hadoop.mapreduce.Job) HCatInputFormat(org.apache.hive.hcatalog.mapreduce.HCatInputFormat)

Example 2 with HCatInputFormat

use of org.apache.hive.hcatalog.mapreduce.HCatInputFormat in project hive by apache.

the class HCatInputFormatReader method read.

@Override
public Iterator<HCatRecord> read() throws HCatException {
    HCatInputFormat inpFmt = new HCatInputFormat();
    RecordReader<WritableComparable, HCatRecord> rr;
    try {
        TaskAttemptContext cntxt = ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext(conf, new TaskAttemptID());
        rr = inpFmt.createRecordReader(split, cntxt);
        rr.initialize(split, cntxt);
    } catch (IOException e) {
        throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    } catch (InterruptedException e) {
        throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    }
    return new HCatRecordItr(rr);
}
Also used : WritableComparable(org.apache.hadoop.io.WritableComparable) TaskAttemptID(org.apache.hadoop.mapreduce.TaskAttemptID) HCatException(org.apache.hive.hcatalog.common.HCatException) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) IOException(java.io.IOException) HCatInputFormat(org.apache.hive.hcatalog.mapreduce.HCatInputFormat) HCatRecord(org.apache.hive.hcatalog.data.HCatRecord)

Example 3 with HCatInputFormat

use of org.apache.hive.hcatalog.mapreduce.HCatInputFormat in project hive by apache.

the class TestE2EScenarios method copyTable.

private void copyTable(String in, String out) throws IOException, InterruptedException {
    Job ijob = new Job();
    Job ojob = new Job();
    HCatInputFormat inpy = new HCatInputFormat();
    inpy.setInput(ijob, null, in);
    HCatOutputFormat oupy = new HCatOutputFormat();
    oupy.setOutput(ojob, OutputJobInfo.create(null, out, new HashMap<String, String>()));
    // Test HCatContext
    System.err.println("HCatContext INSTANCE is present : " + HCatContext.INSTANCE.getConf().isPresent());
    if (HCatContext.INSTANCE.getConf().isPresent()) {
        System.err.println("HCatContext tinyint->int promotion says " + HCatContext.INSTANCE.getConf().get().getBoolean(HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION, HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION_DEFAULT));
    }
    HCatSchema tableSchema = inpy.getTableSchema(ijob.getConfiguration());
    System.err.println("Copying from [" + in + "] to [" + out + "] with schema : " + tableSchema.toString());
    oupy.setSchema(ojob, tableSchema);
    oupy.checkOutputSpecs(ojob);
    OutputCommitter oc = oupy.getOutputCommitter(createTaskAttemptContext(ojob.getConfiguration()));
    oc.setupJob(ojob);
    for (InputSplit split : inpy.getSplits(ijob)) {
        TaskAttemptContext rtaskContext = createTaskAttemptContext(ijob.getConfiguration());
        TaskAttemptContext wtaskContext = createTaskAttemptContext(ojob.getConfiguration());
        RecordReader<WritableComparable, HCatRecord> rr = inpy.createRecordReader(split, rtaskContext);
        rr.initialize(split, rtaskContext);
        OutputCommitter taskOc = oupy.getOutputCommitter(wtaskContext);
        taskOc.setupTask(wtaskContext);
        RecordWriter<WritableComparable<?>, HCatRecord> rw = oupy.getRecordWriter(wtaskContext);
        while (rr.nextKeyValue()) {
            rw.write(rr.getCurrentKey(), rr.getCurrentValue());
        }
        rw.close(wtaskContext);
        taskOc.commitTask(wtaskContext);
        rr.close();
    }
    oc.commitJob(ojob);
}
Also used : OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) HCatSchema(org.apache.hive.hcatalog.data.schema.HCatSchema) HashMap(java.util.HashMap) WritableComparable(org.apache.hadoop.io.WritableComparable) HCatOutputFormat(org.apache.hive.hcatalog.mapreduce.HCatOutputFormat) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) Job(org.apache.hadoop.mapreduce.Job) InputSplit(org.apache.hadoop.mapreduce.InputSplit) HCatInputFormat(org.apache.hive.hcatalog.mapreduce.HCatInputFormat) HCatRecord(org.apache.hive.hcatalog.data.HCatRecord)

Aggregations

HCatInputFormat (org.apache.hive.hcatalog.mapreduce.HCatInputFormat)3 IOException (java.io.IOException)2 WritableComparable (org.apache.hadoop.io.WritableComparable)2 Job (org.apache.hadoop.mapreduce.Job)2 TaskAttemptContext (org.apache.hadoop.mapreduce.TaskAttemptContext)2 HCatException (org.apache.hive.hcatalog.common.HCatException)2 HCatRecord (org.apache.hive.hcatalog.data.HCatRecord)2 HashMap (java.util.HashMap)1 InputSplit (org.apache.hadoop.mapreduce.InputSplit)1 OutputCommitter (org.apache.hadoop.mapreduce.OutputCommitter)1 TaskAttemptID (org.apache.hadoop.mapreduce.TaskAttemptID)1 HCatSchema (org.apache.hive.hcatalog.data.schema.HCatSchema)1 HCatOutputFormat (org.apache.hive.hcatalog.mapreduce.HCatOutputFormat)1