use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.
the class DDLTask method preInsertWork.
private int preInsertWork(Hive db, PreInsertTableDesc preInsertTableDesc) throws HiveException {
try {
HiveMetaHook hook = preInsertTableDesc.getTable().getStorageHandler().getMetaHook();
if (hook == null || !(hook instanceof DefaultHiveMetaHook)) {
return 0;
}
DefaultHiveMetaHook hiveMetaHook = (DefaultHiveMetaHook) hook;
hiveMetaHook.preInsertTable(preInsertTableDesc.getTable().getTTable(), preInsertTableDesc.isOverwrite());
} catch (MetaException e) {
throw new HiveException(e);
}
return 0;
}
use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.
the class CommonMergeJoinOperator method joinOneGroup.
private List<Byte> joinOneGroup(boolean clear) throws HiveException {
int[] smallestPos = findSmallestKey();
List<Byte> listOfNeedFetchNext = null;
if (smallestPos != null) {
listOfNeedFetchNext = joinObject(smallestPos, clear);
if ((listOfNeedFetchNext.size() > 0) && clear) {
// new group.
for (Byte b : listOfNeedFetchNext) {
try {
fetchNextGroup(b);
} catch (Exception e) {
throw new HiveException(e);
}
}
}
}
return listOfNeedFetchNext;
}
use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.
the class SkewJoinHandler method close.
public void close(boolean abort) throws HiveException {
if (!abort) {
try {
endGroup();
commit();
} catch (IOException e) {
throw new HiveException(e);
}
} else {
for (int bigKeyTbl = 0; bigKeyTbl < numAliases; bigKeyTbl++) {
// table
if (!bigKeysExistingMap.get((byte) bigKeyTbl)) {
continue;
}
try {
Path specPath = conf.getBigKeysDirMap().get((byte) bigKeyTbl);
Path bigKeyPath = getOperatorOutputPath(specPath);
FileSystem fs = bigKeyPath.getFileSystem(hconf);
delete(bigKeyPath, fs);
for (int smallKeyTbl = 0; smallKeyTbl < numAliases; smallKeyTbl++) {
if (((byte) smallKeyTbl) == bigKeyTbl) {
continue;
}
specPath = conf.getSmallKeysDirMap().get((byte) bigKeyTbl).get((byte) smallKeyTbl);
delete(getOperatorOutputPath(specPath), fs);
}
} catch (IOException e) {
throw new HiveException(e);
}
}
}
}
use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.
the class SparkHashTableSinkOperator method closeOp.
@Override
public void closeOp(boolean abort) throws HiveException {
try {
MapJoinPersistableTableContainer[] mapJoinTables = htsOperator.mapJoinTables;
byte tag = conf.getTag();
if (mapJoinTables == null || mapJoinTables.length < tag || mapJoinTables[tag] == null) {
LOG.debug("mapJoinTable is null");
} else if (abort) {
if (LOG.isDebugEnabled()) {
LOG.debug("Aborting, skip dumping side-table for tag: " + tag);
}
} else {
String method = PerfLogger.SPARK_FLUSH_HASHTABLE + getName();
perfLogger.PerfLogBegin(CLASS_NAME, method);
try {
flushToFile(mapJoinTables[tag], tag);
} finally {
perfLogger.PerfLogEnd(CLASS_NAME, method);
}
}
super.closeOp(abort);
} catch (HiveException e) {
throw e;
} catch (Exception e) {
throw new HiveException(e);
}
}
use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.
the class StatsNoJobTask method execute.
@Override
public int execute(DriverContext driverContext) {
LOG.info("Executing stats (no job) task");
String tableName = "";
ExecutorService threadPool = null;
Hive db = getHive();
try {
tableName = work.getTableSpecs().tableName;
table = db.getTable(tableName);
int numThreads = HiveConf.getIntVar(conf, ConfVars.HIVE_STATS_GATHER_NUM_THREADS);
tableFullName = table.getDbName() + "." + table.getTableName();
threadPool = Executors.newFixedThreadPool(numThreads, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("StatsNoJobTask-Thread-%d").build());
partUpdates = new MapMaker().concurrencyLevel(numThreads).makeMap();
LOG.info("Initialized threadpool for stats computation with " + numThreads + " threads");
} catch (HiveException e) {
LOG.error("Cannot get table " + tableName, e);
console.printError("Cannot get table " + tableName, e.toString());
}
return aggregateStats(threadPool, db);
}
Aggregations