use of org.apache.hadoop.hive.ql.Context in project hive by apache.
the class DDLTask method unlockDatabase.
/**
* Unlock the database specified
*
* @param unlockDb
* the database to be unlocked
* @return Returns 0 when execution succeeds and above 0 if it fails.
* @throws HiveException
* Throws this exception if an unexpected error occurs.
*/
private int unlockDatabase(Hive db, UnlockDatabaseDesc unlockDb) throws HiveException {
Context ctx = driverContext.getCtx();
HiveTxnManager txnManager = ctx.getHiveTxnManager();
return txnManager.unlockDatabase(db, unlockDb);
}
use of org.apache.hadoop.hive.ql.Context in project hive by apache.
the class DDLTask method showLocks.
/**
* Write a list of the current locks to a file.
* @param db
*
* @param showLocks
* the locks we're interested in.
* @return Returns 0 when execution succeeds and above 0 if it fails.
* @throws HiveException
* Throws this exception if an unexpected error occurs.
*/
private int showLocks(Hive db, ShowLocksDesc showLocks) throws HiveException {
Context ctx = driverContext.getCtx();
HiveTxnManager txnManager = ctx.getHiveTxnManager();
HiveLockManager lockMgr = txnManager.getLockManager();
if (txnManager.useNewShowLocksFormat())
return showLocksNewFormat(showLocks, lockMgr);
boolean isExt = showLocks.isExt();
if (lockMgr == null) {
throw new HiveException("show Locks LockManager not specified");
}
// write the results in the file
DataOutputStream outStream = getOutputStream(showLocks.getResFile());
try {
List<HiveLock> locks = null;
if (showLocks.getTableName() == null) {
// TODO should be doing security check here. Users should not be
// able to see each other's locks.
locks = lockMgr.getLocks(false, isExt);
} else {
locks = lockMgr.getLocks(HiveLockObject.createFrom(db, showLocks.getTableName(), showLocks.getPartSpec()), true, isExt);
}
Collections.sort(locks, new Comparator<HiveLock>() {
@Override
public int compare(HiveLock o1, HiveLock o2) {
int cmp = o1.getHiveLockObject().getName().compareTo(o2.getHiveLockObject().getName());
if (cmp == 0) {
if (o1.getHiveLockMode() == o2.getHiveLockMode()) {
return cmp;
}
// EXCLUSIVE locks occur before SHARED locks
if (o1.getHiveLockMode() == HiveLockMode.EXCLUSIVE) {
return -1;
}
return +1;
}
return cmp;
}
});
Iterator<HiveLock> locksIter = locks.iterator();
while (locksIter.hasNext()) {
HiveLock lock = locksIter.next();
outStream.writeBytes(lock.getHiveLockObject().getDisplayName());
outStream.write(separator);
outStream.writeBytes(lock.getHiveLockMode().toString());
if (isExt) {
HiveLockObjectData lockData = lock.getHiveLockObject().getData();
if (lockData != null) {
outStream.write(terminator);
outStream.writeBytes("LOCK_QUERYID:" + lockData.getQueryId());
outStream.write(terminator);
outStream.writeBytes("LOCK_TIME:" + lockData.getLockTime());
outStream.write(terminator);
outStream.writeBytes("LOCK_MODE:" + lockData.getLockMode());
outStream.write(terminator);
outStream.writeBytes("LOCK_QUERYSTRING:" + lockData.getQueryStr());
}
}
outStream.write(terminator);
}
} catch (FileNotFoundException e) {
LOG.warn("show function: " + stringifyException(e));
return 1;
} catch (IOException e) {
LOG.warn("show function: " + stringifyException(e));
return 1;
} catch (Exception e) {
throw new HiveException(e.toString(), e);
} finally {
IOUtils.closeStream(outStream);
}
return 0;
}
use of org.apache.hadoop.hive.ql.Context in project hive by apache.
the class DDLTask method lockTable.
/**
* Lock the table/partition specified
* @param db
*
* @param lockTbl
* the table/partition to be locked along with the mode
* @return Returns 0 when execution succeeds and above 0 if it fails.
* @throws HiveException
* Throws this exception if an unexpected error occurs.
*/
private int lockTable(Hive db, LockTableDesc lockTbl) throws HiveException {
Context ctx = driverContext.getCtx();
HiveTxnManager txnManager = ctx.getHiveTxnManager();
return txnManager.lockTable(db, lockTbl);
}
use of org.apache.hadoop.hive.ql.Context in project hive by apache.
the class DDLTask method lockDatabase.
/**
* Lock the database
*
* @param lockDb
* the database to be locked along with the mode
* @return Returns 0 when execution succeeds and above 0 if it fails.
* @throws HiveException
* Throws this exception if an unexpected error occurs.
*/
private int lockDatabase(Hive db, LockDatabaseDesc lockDb) throws HiveException {
Context ctx = driverContext.getCtx();
HiveTxnManager txnManager = ctx.getHiveTxnManager();
return txnManager.lockDatabase(db, lockDb);
}
use of org.apache.hadoop.hive.ql.Context in project hive by apache.
the class LocalHiveSparkClient method execute.
@Override
public SparkJobRef execute(DriverContext driverContext, SparkWork sparkWork) throws Exception {
Context ctx = driverContext.getCtx();
HiveConf hiveConf = (HiveConf) ctx.getConf();
refreshLocalResources(sparkWork, hiveConf);
JobConf jobConf = new JobConf(hiveConf);
// Create temporary scratch dir
Path emptyScratchDir;
emptyScratchDir = ctx.getMRTmpPath();
FileSystem fs = emptyScratchDir.getFileSystem(jobConf);
fs.mkdirs(emptyScratchDir);
// Update credential provider location
// the password to the credential provider in already set in the sparkConf
// in HiveSparkClientFactory
HiveConfUtil.updateJobCredentialProviders(jobConf);
SparkCounters sparkCounters = new SparkCounters(sc);
Map<String, List<String>> prefixes = sparkWork.getRequiredCounterPrefix();
if (prefixes != null) {
for (String group : prefixes.keySet()) {
for (String counterName : prefixes.get(group)) {
sparkCounters.createCounter(group, counterName);
}
}
}
SparkReporter sparkReporter = new SparkReporter(sparkCounters);
// Generate Spark plan
SparkPlanGenerator gen = new SparkPlanGenerator(sc, ctx, jobConf, emptyScratchDir, sparkReporter);
SparkPlan plan = gen.generate(sparkWork);
if (driverContext.isShutdown()) {
throw new HiveException("Operation is cancelled.");
}
// Execute generated plan.
JavaPairRDD<HiveKey, BytesWritable> finalRDD = plan.generateGraph();
// We use Spark RDD async action to submit job as it's the only way to get jobId now.
JavaFutureAction<Void> future = finalRDD.foreachAsync(HiveVoidFunction.getInstance());
// As we always use foreach action to submit RDD graph, it would only trigger one job.
int jobId = future.jobIds().get(0);
LocalSparkJobStatus sparkJobStatus = new LocalSparkJobStatus(sc, jobId, jobMetricsListener, sparkCounters, plan.getCachedRDDIds(), future);
return new LocalSparkJobRef(Integer.toString(jobId), hiveConf, sparkJobStatus, sc);
}
Aggregations