Search in sources :

Example 11 with Context

use of org.apache.hadoop.hive.ql.Context in project hive by apache.

the class DDLTask method unlockDatabase.

/**
   * Unlock the database specified
   *
   * @param unlockDb
   *          the database to be unlocked
   * @return Returns 0 when execution succeeds and above 0 if it fails.
   * @throws HiveException
   *           Throws this exception if an unexpected error occurs.
   */
private int unlockDatabase(Hive db, UnlockDatabaseDesc unlockDb) throws HiveException {
    Context ctx = driverContext.getCtx();
    HiveTxnManager txnManager = ctx.getHiveTxnManager();
    return txnManager.unlockDatabase(db, unlockDb);
}
Also used : EnvironmentContext(org.apache.hadoop.hive.metastore.api.EnvironmentContext) Context(org.apache.hadoop.hive.ql.Context) CompilationOpContext(org.apache.hadoop.hive.ql.CompilationOpContext) DriverContext(org.apache.hadoop.hive.ql.DriverContext) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager)

Example 12 with Context

use of org.apache.hadoop.hive.ql.Context in project hive by apache.

the class DDLTask method showLocks.

/**
   * Write a list of the current locks to a file.
   * @param db
   *
   * @param showLocks
   *          the locks we're interested in.
   * @return Returns 0 when execution succeeds and above 0 if it fails.
   * @throws HiveException
   *           Throws this exception if an unexpected error occurs.
   */
private int showLocks(Hive db, ShowLocksDesc showLocks) throws HiveException {
    Context ctx = driverContext.getCtx();
    HiveTxnManager txnManager = ctx.getHiveTxnManager();
    HiveLockManager lockMgr = txnManager.getLockManager();
    if (txnManager.useNewShowLocksFormat())
        return showLocksNewFormat(showLocks, lockMgr);
    boolean isExt = showLocks.isExt();
    if (lockMgr == null) {
        throw new HiveException("show Locks LockManager not specified");
    }
    // write the results in the file
    DataOutputStream outStream = getOutputStream(showLocks.getResFile());
    try {
        List<HiveLock> locks = null;
        if (showLocks.getTableName() == null) {
            // TODO should be doing security check here.  Users should not be
            // able to see each other's locks.
            locks = lockMgr.getLocks(false, isExt);
        } else {
            locks = lockMgr.getLocks(HiveLockObject.createFrom(db, showLocks.getTableName(), showLocks.getPartSpec()), true, isExt);
        }
        Collections.sort(locks, new Comparator<HiveLock>() {

            @Override
            public int compare(HiveLock o1, HiveLock o2) {
                int cmp = o1.getHiveLockObject().getName().compareTo(o2.getHiveLockObject().getName());
                if (cmp == 0) {
                    if (o1.getHiveLockMode() == o2.getHiveLockMode()) {
                        return cmp;
                    }
                    // EXCLUSIVE locks occur before SHARED locks
                    if (o1.getHiveLockMode() == HiveLockMode.EXCLUSIVE) {
                        return -1;
                    }
                    return +1;
                }
                return cmp;
            }
        });
        Iterator<HiveLock> locksIter = locks.iterator();
        while (locksIter.hasNext()) {
            HiveLock lock = locksIter.next();
            outStream.writeBytes(lock.getHiveLockObject().getDisplayName());
            outStream.write(separator);
            outStream.writeBytes(lock.getHiveLockMode().toString());
            if (isExt) {
                HiveLockObjectData lockData = lock.getHiveLockObject().getData();
                if (lockData != null) {
                    outStream.write(terminator);
                    outStream.writeBytes("LOCK_QUERYID:" + lockData.getQueryId());
                    outStream.write(terminator);
                    outStream.writeBytes("LOCK_TIME:" + lockData.getLockTime());
                    outStream.write(terminator);
                    outStream.writeBytes("LOCK_MODE:" + lockData.getLockMode());
                    outStream.write(terminator);
                    outStream.writeBytes("LOCK_QUERYSTRING:" + lockData.getQueryStr());
                }
            }
            outStream.write(terminator);
        }
    } catch (FileNotFoundException e) {
        LOG.warn("show function: " + stringifyException(e));
        return 1;
    } catch (IOException e) {
        LOG.warn("show function: " + stringifyException(e));
        return 1;
    } catch (Exception e) {
        throw new HiveException(e.toString(), e);
    } finally {
        IOUtils.closeStream(outStream);
    }
    return 0;
}
Also used : EnvironmentContext(org.apache.hadoop.hive.metastore.api.EnvironmentContext) Context(org.apache.hadoop.hive.ql.Context) CompilationOpContext(org.apache.hadoop.hive.ql.CompilationOpContext) DriverContext(org.apache.hadoop.hive.ql.DriverContext) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) DataOutputStream(java.io.DataOutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) FileNotFoundException(java.io.FileNotFoundException) HiveLock(org.apache.hadoop.hive.ql.lockmgr.HiveLock) IOException(java.io.IOException) HiveLockManager(org.apache.hadoop.hive.ql.lockmgr.HiveLockManager) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) IOException(java.io.IOException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) URISyntaxException(java.net.URISyntaxException) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) StringUtils.stringifyException(org.apache.hadoop.util.StringUtils.stringifyException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) SQLException(java.sql.SQLException) FileNotFoundException(java.io.FileNotFoundException) HiveAuthzPluginException(org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException) InvalidTableException(org.apache.hadoop.hive.ql.metadata.InvalidTableException) HiveLockObjectData(org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager)

Example 13 with Context

use of org.apache.hadoop.hive.ql.Context in project hive by apache.

the class DDLTask method lockTable.

/**
   * Lock the table/partition specified
   * @param db
   *
   * @param lockTbl
   *          the table/partition to be locked along with the mode
   * @return Returns 0 when execution succeeds and above 0 if it fails.
   * @throws HiveException
   *           Throws this exception if an unexpected error occurs.
   */
private int lockTable(Hive db, LockTableDesc lockTbl) throws HiveException {
    Context ctx = driverContext.getCtx();
    HiveTxnManager txnManager = ctx.getHiveTxnManager();
    return txnManager.lockTable(db, lockTbl);
}
Also used : EnvironmentContext(org.apache.hadoop.hive.metastore.api.EnvironmentContext) Context(org.apache.hadoop.hive.ql.Context) CompilationOpContext(org.apache.hadoop.hive.ql.CompilationOpContext) DriverContext(org.apache.hadoop.hive.ql.DriverContext) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager)

Example 14 with Context

use of org.apache.hadoop.hive.ql.Context in project hive by apache.

the class DDLTask method lockDatabase.

/**
   * Lock the database
   *
   * @param lockDb
   *          the database to be locked along with the mode
   * @return Returns 0 when execution succeeds and above 0 if it fails.
   * @throws HiveException
   *           Throws this exception if an unexpected error occurs.
   */
private int lockDatabase(Hive db, LockDatabaseDesc lockDb) throws HiveException {
    Context ctx = driverContext.getCtx();
    HiveTxnManager txnManager = ctx.getHiveTxnManager();
    return txnManager.lockDatabase(db, lockDb);
}
Also used : EnvironmentContext(org.apache.hadoop.hive.metastore.api.EnvironmentContext) Context(org.apache.hadoop.hive.ql.Context) CompilationOpContext(org.apache.hadoop.hive.ql.CompilationOpContext) DriverContext(org.apache.hadoop.hive.ql.DriverContext) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager)

Example 15 with Context

use of org.apache.hadoop.hive.ql.Context in project hive by apache.

the class LocalHiveSparkClient method execute.

@Override
public SparkJobRef execute(DriverContext driverContext, SparkWork sparkWork) throws Exception {
    Context ctx = driverContext.getCtx();
    HiveConf hiveConf = (HiveConf) ctx.getConf();
    refreshLocalResources(sparkWork, hiveConf);
    JobConf jobConf = new JobConf(hiveConf);
    // Create temporary scratch dir
    Path emptyScratchDir;
    emptyScratchDir = ctx.getMRTmpPath();
    FileSystem fs = emptyScratchDir.getFileSystem(jobConf);
    fs.mkdirs(emptyScratchDir);
    // Update credential provider location
    // the password to the credential provider in already set in the sparkConf
    // in HiveSparkClientFactory
    HiveConfUtil.updateJobCredentialProviders(jobConf);
    SparkCounters sparkCounters = new SparkCounters(sc);
    Map<String, List<String>> prefixes = sparkWork.getRequiredCounterPrefix();
    if (prefixes != null) {
        for (String group : prefixes.keySet()) {
            for (String counterName : prefixes.get(group)) {
                sparkCounters.createCounter(group, counterName);
            }
        }
    }
    SparkReporter sparkReporter = new SparkReporter(sparkCounters);
    // Generate Spark plan
    SparkPlanGenerator gen = new SparkPlanGenerator(sc, ctx, jobConf, emptyScratchDir, sparkReporter);
    SparkPlan plan = gen.generate(sparkWork);
    if (driverContext.isShutdown()) {
        throw new HiveException("Operation is cancelled.");
    }
    // Execute generated plan.
    JavaPairRDD<HiveKey, BytesWritable> finalRDD = plan.generateGraph();
    // We use Spark RDD async action to submit job as it's the only way to get jobId now.
    JavaFutureAction<Void> future = finalRDD.foreachAsync(HiveVoidFunction.getInstance());
    // As we always use foreach action to submit RDD graph, it would only trigger one job.
    int jobId = future.jobIds().get(0);
    LocalSparkJobStatus sparkJobStatus = new LocalSparkJobStatus(sc, jobId, jobMetricsListener, sparkCounters, plan.getCachedRDDIds(), future);
    return new LocalSparkJobRef(Integer.toString(jobId), hiveConf, sparkJobStatus, sc);
}
Also used : JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) Context(org.apache.hadoop.hive.ql.Context) DriverContext(org.apache.hadoop.hive.ql.DriverContext) Path(org.apache.hadoop.fs.Path) SparkCounters(org.apache.hive.spark.counter.SparkCounters) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) BytesWritable(org.apache.hadoop.io.BytesWritable) LocalSparkJobStatus(org.apache.hadoop.hive.ql.exec.spark.status.impl.LocalSparkJobStatus) HiveKey(org.apache.hadoop.hive.ql.io.HiveKey) FileSystem(org.apache.hadoop.fs.FileSystem) HiveConf(org.apache.hadoop.hive.conf.HiveConf) ArrayList(java.util.ArrayList) List(java.util.List) LocalSparkJobRef(org.apache.hadoop.hive.ql.exec.spark.status.impl.LocalSparkJobRef) JobConf(org.apache.hadoop.mapred.JobConf)

Aggregations

Context (org.apache.hadoop.hive.ql.Context)47 Path (org.apache.hadoop.fs.Path)19 IOException (java.io.IOException)15 DriverContext (org.apache.hadoop.hive.ql.DriverContext)15 CompilationOpContext (org.apache.hadoop.hive.ql.CompilationOpContext)13 FileSystem (org.apache.hadoop.fs.FileSystem)9 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)9 Serializable (java.io.Serializable)8 Task (org.apache.hadoop.hive.ql.exec.Task)7 HiveConf (org.apache.hadoop.hive.conf.HiveConf)5 Table (org.apache.hadoop.hive.ql.metadata.Table)5 ParseContext (org.apache.hadoop.hive.ql.parse.ParseContext)5 TableDesc (org.apache.hadoop.hive.ql.plan.TableDesc)5 ArrayList (java.util.ArrayList)4 EnvironmentContext (org.apache.hadoop.hive.metastore.api.EnvironmentContext)4 TableScanOperator (org.apache.hadoop.hive.ql.exec.TableScanOperator)4 HiveTxnManager (org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager)4 MapWork (org.apache.hadoop.hive.ql.plan.MapWork)4 JobClient (org.apache.hadoop.mapred.JobClient)4 JobConf (org.apache.hadoop.mapred.JobConf)4