use of org.apache.hadoop.hive.ql.QueryPlan in project hive by apache.
the class TestDbTxnManager method testDDLShared.
@Test
public void testDDLShared() throws Exception {
WriteEntity we = addTableOutput(WriteEntity.WriteType.DDL_SHARED);
QueryPlan qp = new MockQueryPlan(this);
txnMgr.acquireLocks(qp, ctx, "fred");
List<HiveLock> locks = ctx.getHiveLocks();
Assert.assertEquals(1, locks.size());
Assert.assertEquals(1, TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
txnMgr.getLockManager().unlock(locks.get(0));
locks = txnMgr.getLockManager().getLocks(false, false);
Assert.assertEquals(0, locks.size());
}
use of org.apache.hadoop.hive.ql.QueryPlan in project hive by apache.
the class TestDbTxnManager method testHeartbeater.
@Test
public void testHeartbeater() throws Exception {
Assert.assertTrue(txnMgr instanceof DbTxnManager);
addTableInput();
LockException exception = null;
QueryPlan qp = new MockQueryPlan(this);
// Case 1: If there's no delay for the heartbeat, txn should be able to commit
txnMgr.openTxn(ctx, "fred");
// heartbeat started..
txnMgr.acquireLocks(qp, ctx, "fred");
runReaper();
try {
txnMgr.commitTxn();
} catch (LockException e) {
exception = e;
}
Assert.assertNull("Txn commit should be successful", exception);
exception = null;
// Case 2: If there's delay for the heartbeat, but the delay is within the reaper's tolerance,
// then txt should be able to commit
// Start the heartbeat after a delay, which is shorter than the HIVE_TXN_TIMEOUT
((DbTxnManager) txnMgr).openTxn(ctx, "tom", HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, TimeUnit.MILLISECONDS) / 2);
txnMgr.acquireLocks(qp, ctx, "tom");
runReaper();
try {
txnMgr.commitTxn();
} catch (LockException e) {
exception = e;
}
Assert.assertNull("Txn commit should also be successful", exception);
exception = null;
// Case 3: If there's delay for the heartbeat, and the delay is long enough to trigger the reaper,
// then the txn will time out and be aborted.
// Here we just don't send the heartbeat at all - an infinite delay.
// Start the heartbeat after a delay, which exceeds the HIVE_TXN_TIMEOUT
((DbTxnManager) txnMgr).openTxn(ctx, "jerry", HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, TimeUnit.MILLISECONDS) * 2);
txnMgr.acquireLocks(qp, ctx, "jerry");
Thread.sleep(HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, TimeUnit.MILLISECONDS));
runReaper();
try {
txnMgr.commitTxn();
} catch (LockException e) {
exception = e;
}
Assert.assertNotNull("Txn should have been aborted", exception);
Assert.assertEquals(ErrorMsg.TXN_ABORTED, exception.getCanonicalErrorMsg());
}
use of org.apache.hadoop.hive.ql.QueryPlan in project hive by apache.
the class TestDbTxnManager method testSingleWritePartition.
@Test
public void testSingleWritePartition() throws Exception {
WriteEntity we = addPartitionOutput(newTable(true), WriteEntity.WriteType.INSERT);
QueryPlan qp = new MockQueryPlan(this);
txnMgr.openTxn(ctx, "fred");
txnMgr.acquireLocks(qp, ctx, "fred");
List<HiveLock> locks = ctx.getHiveLocks();
Assert.assertEquals(1, locks.size());
Assert.assertEquals(1, TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
txnMgr.commitTxn();
locks = txnMgr.getLockManager().getLocks(false, false);
Assert.assertEquals(0, locks.size());
}
use of org.apache.hadoop.hive.ql.QueryPlan in project hive by apache.
the class ATSHook method run.
@Override
public void run(final HookContext hookContext) throws Exception {
final long currentTime = System.currentTimeMillis();
final HiveConf conf = new HiveConf(hookContext.getConf());
final QueryState queryState = hookContext.getQueryState();
final String queryId = queryState.getQueryId();
final Map<String, Long> durations = new HashMap<String, Long>();
for (String key : hookContext.getPerfLogger().getEndTimes().keySet()) {
durations.put(key, hookContext.getPerfLogger().getDuration(key));
}
try {
setupAtsExecutor(conf);
final String domainId = createOrGetDomain(hookContext);
executor.submit(new Runnable() {
@Override
public void run() {
try {
QueryPlan plan = hookContext.getQueryPlan();
if (plan == null) {
return;
}
String queryId = plan.getQueryId();
String opId = hookContext.getOperationId();
long queryStartTime = plan.getQueryStartTime();
String user = hookContext.getUgi().getShortUserName();
String requestuser = hookContext.getUserName();
if (hookContext.getUserName() == null) {
requestuser = hookContext.getUgi().getUserName();
}
int numMrJobs = Utilities.getMRTasks(plan.getRootTasks()).size();
int numTezJobs = Utilities.getTezTasks(plan.getRootTasks()).size();
if (numMrJobs + numTezJobs <= 0) {
// ignore client only queries
return;
}
switch(hookContext.getHookType()) {
case PRE_EXEC_HOOK:
ExplainConfiguration config = new ExplainConfiguration();
config.setFormatted(true);
ExplainWork work = new // resFile
ExplainWork(// resFile
null, // pCtx
null, // RootTasks
plan.getRootTasks(), // FetchTask
plan.getFetchTask(), // analyzer
null, //explainConfig
config, // cboInfo
null);
@SuppressWarnings("unchecked") ExplainTask explain = (ExplainTask) TaskFactory.get(work, conf);
explain.initialize(queryState, plan, null, null);
String query = plan.getQueryStr();
JSONObject explainPlan = explain.getJSONPlan(null, work);
String logID = conf.getLogIdVar(hookContext.getSessionId());
List<String> tablesRead = getTablesFromEntitySet(hookContext.getInputs());
List<String> tablesWritten = getTablesFromEntitySet(hookContext.getOutputs());
String executionMode = getExecutionMode(plan).name();
String hiveInstanceAddress = hookContext.getHiveInstanceAddress();
if (hiveInstanceAddress == null) {
hiveInstanceAddress = InetAddress.getLocalHost().getHostAddress();
}
String hiveInstanceType = hookContext.isHiveServerQuery() ? "HS2" : "CLI";
ApplicationId llapId = determineLlapId(conf, plan);
fireAndForget(createPreHookEvent(queryId, query, explainPlan, queryStartTime, user, requestuser, numMrJobs, numTezJobs, opId, hookContext.getIpAddress(), hiveInstanceAddress, hiveInstanceType, hookContext.getSessionId(), logID, hookContext.getThreadId(), executionMode, tablesRead, tablesWritten, conf, llapId, domainId));
break;
case POST_EXEC_HOOK:
fireAndForget(createPostHookEvent(queryId, currentTime, user, requestuser, true, opId, durations, domainId));
break;
case ON_FAILURE_HOOK:
fireAndForget(createPostHookEvent(queryId, currentTime, user, requestuser, false, opId, durations, domainId));
break;
default:
//ignore
break;
}
} catch (Exception e) {
LOG.warn("Failed to submit plan to ATS for " + queryId, e);
}
}
});
} catch (Exception e) {
LOG.warn("Failed to submit to ATS for " + queryId, e);
}
}
Aggregations