use of org.apache.hadoop.hive.ql.exec.AbstractFileMergeOperator in project hive by apache.
the class MergeFileMapper method configure.
@Override
public void configure(JobConf job) {
jc = job;
MapWork mapWork = Utilities.getMapWork(job);
try {
if (mapWork instanceof MergeFileWork) {
MergeFileWork mfWork = (MergeFileWork) mapWork;
String alias = mfWork.getAliasToWork().keySet().iterator().next();
op = mfWork.getAliasToWork().get(alias);
if (op instanceof AbstractFileMergeOperator) {
mergeOp = (AbstractFileMergeOperator) op;
mergeOp.initializeOp(jc);
row = new Object[2];
abort = false;
} else {
abort = true;
throw new RuntimeException("Merge file work's top operator should be an" + " instance of AbstractFileMergeOperator");
}
} else {
abort = true;
throw new RuntimeException("Map work should be a merge file work.");
}
} catch (HiveException e) {
abort = true;
throw new RuntimeException(e);
}
}
use of org.apache.hadoop.hive.ql.exec.AbstractFileMergeOperator in project hive by apache.
the class SparkMergeFileRecordHandler method init.
@SuppressWarnings("unchecked")
@Override
public <K, V> void init(JobConf job, OutputCollector<K, V> output, Reporter reporter) throws Exception {
super.init(job, output, reporter);
try {
jc = job;
MapWork mapWork = Utilities.getMapWork(job);
if (mapWork instanceof MergeFileWork) {
MergeFileWork mergeFileWork = (MergeFileWork) mapWork;
String alias = mergeFileWork.getAliasToWork().keySet().iterator().next();
op = mergeFileWork.getAliasToWork().get(alias);
if (op instanceof AbstractFileMergeOperator) {
mergeOp = (AbstractFileMergeOperator<? extends FileMergeDesc>) op;
mergeOp.initializeOp(jc);
row = new Object[2];
abort = false;
} else {
abort = true;
throw new IllegalStateException("Merge file work's top operator should be an" + " instance of AbstractFileMergeOperator");
}
} else {
abort = true;
throw new IllegalStateException("Map work should be a merge file work.");
}
LOG.info(mergeOp.dump(0));
} catch (HiveException e) {
abort = true;
throw new RuntimeException(e);
}
}
Aggregations