use of org.apache.hadoop.hive.metastore.model.MScheduledExecution in project hive by apache.
the class TestMetastoreScheduledQueries method testPoll.
@Test
public void testPoll() throws Exception {
ScheduledQuery schq = createScheduledQuery(new ScheduledQueryKey("q1", "polltest"));
ScheduledQueryMaintenanceRequest r = new ScheduledQueryMaintenanceRequest();
r.setType(ScheduledQueryMaintenanceRequestType.CREATE);
r.setScheduledQuery(schq);
client.scheduledQueryMaintenance(r);
schq.setScheduleKey(new ScheduledQueryKey("q1", "polltestOther"));
client.scheduledQueryMaintenance(r);
// disabled queries are not considered
schq.setScheduleKey(new ScheduledQueryKey("q2disabled", "polltest"));
schq.setEnabled(false);
client.scheduledQueryMaintenance(r);
// do some poll requests; and wait for q1's execution
ScheduledQueryPollRequest request = new ScheduledQueryPollRequest();
request.setClusterNamespace("polltest");
ScheduledQueryPollResponse pollResult = null;
// wait for poll to hit
for (int i = 0; i < 30; i++) {
pollResult = client.scheduledQueryPoll(request);
if (pollResult.isSetQuery()) {
break;
}
Thread.sleep(100);
}
assertTrue(pollResult.isSetQuery());
assertTrue(pollResult.isSetScheduleKey());
assertTrue(pollResult.isSetExecutionId());
// after reading the only scheduled query; there are no more queries to run (for 1 sec)
ScheduledQueryPollResponse pollResult2 = client.scheduledQueryPoll(request);
assertTrue(!pollResult2.isSetQuery());
try (PersistenceManager pm = PersistenceManagerProvider.getPersistenceManager()) {
MScheduledExecution q = pm.getObjectById(MScheduledExecution.class, pollResult.getExecutionId());
assertNotNull(q);
assertEquals(QueryState.INITED, q.getState());
assertTrue(q.getStartTime() <= getEpochSeconds());
assertTrue(q.getStartTime() >= getEpochSeconds() - 1);
assertTrue(q.getEndTime() == null);
assertTrue(q.getLastUpdateTime() <= getEpochSeconds());
assertTrue(q.getLastUpdateTime() >= getEpochSeconds() - 1);
}
// wait 1 sec
Thread.sleep(1000);
ScheduledQueryProgressInfo info;
info = new ScheduledQueryProgressInfo(pollResult.getExecutionId(), QueryState.EXECUTING, "executor-query-id");
client.scheduledQueryProgress(info);
try (PersistenceManager pm = PersistenceManagerProvider.getPersistenceManager()) {
MScheduledExecution q = pm.getObjectById(MScheduledExecution.class, pollResult.getExecutionId());
assertEquals(QueryState.EXECUTING, q.getState());
assertEquals("executor-query-id", q.getExecutorQueryId());
assertTrue(q.getLastUpdateTime() <= getEpochSeconds());
assertTrue(q.getLastUpdateTime() >= getEpochSeconds() - 1);
}
// wait 1 sec
Thread.sleep(1000);
info = new ScheduledQueryProgressInfo(pollResult.getExecutionId(), QueryState.FAILED, "executor-query-id");
info.setErrorMessage(generateLongErrorMessage());
// info.set
client.scheduledQueryProgress(info);
try (PersistenceManager pm = PersistenceManagerProvider.getPersistenceManager()) {
MScheduledExecution q = pm.getObjectById(MScheduledExecution.class, pollResult.getExecutionId());
assertEquals(QueryState.FAILED, q.getState());
assertEquals("executor-query-id", q.getExecutorQueryId());
assertNull(q.getLastUpdateTime());
assertTrue(q.getEndTime() <= getEpochSeconds());
assertTrue(q.getEndTime() >= getEpochSeconds() - 1);
assertTrue(q.getErrorMessage().length() < 2000);
assertFalse(q.getErrorMessage().contains("x"));
}
// clustername is taken into account; this should be empty
request.setClusterNamespace("polltestSomethingElse");
pollResult = client.scheduledQueryPoll(request);
assertFalse(pollResult.isSetQuery());
}
use of org.apache.hadoop.hive.metastore.model.MScheduledExecution in project hive by apache.
the class TestMetastoreScheduledQueries method testSkip2.
@Test
public void testSkip2() throws Exception {
metaStore.getConf().set(MetastoreConf.ConfVars.SCHEDULED_QUERIES_AUTODISABLE_COUNT.getVarname(), "4");
metaStore.getConf().set(MetastoreConf.ConfVars.SCHEDULED_QUERIES_SKIP_OPPORTUNITIES_AFTER_FAILURES.getVarname(), "2");
client.close();
client = metaStore.getClient();
testDisableInternal(5, 6, "skip2");
try (PersistenceManager pm = PersistenceManagerProvider.getPersistenceManager()) {
ScheduledQueryKey key = new ScheduledQueryKey("q1", "skip2");
Query query = pm.newQuery(MScheduledExecution.class);
query.setOrdering("scheduledExecutionId descending");
query.setRange(0, 20);
List<MScheduledExecution> list = (List<MScheduledExecution>) query.execute();
List<MScheduledExecution> q1list = new ArrayList<MScheduledExecution>();
List<Integer> tList = new ArrayList<Integer>();
for (MScheduledExecution schqExec : list) {
if (schqExec.getScheduledQuery().getScheduleKey().equals(key)) {
q1list.add(schqExec);
tList.add(schqExec.getStartTime());
}
}
tList = Lists.reverse(tList);
Integer startTime = tList.get(0);
tList = tList.stream().map(e -> e - startTime).collect(Collectors.toList());
assertArrayEquals(new Integer[] { 0, 1, 2, 4, 6, 6 }, tList.toArray());
}
}
use of org.apache.hadoop.hive.metastore.model.MScheduledExecution in project hive by apache.
the class ObjectStore method markScheduledExecutionsTimedOut.
@Override
public int markScheduledExecutionsTimedOut(int timeoutSecs) throws InvalidOperationException, MetaException {
if (timeoutSecs < 0) {
LOG.debug("scheduled executions - time_out mark is disabled");
return 0;
}
boolean committed = false;
Query q = null;
try {
openTransaction();
int maxLastUpdateTime = (int) (System.currentTimeMillis() / 1000) - timeoutSecs;
q = pm.newQuery(MScheduledExecution.class);
q.setFilter("lastUpdateTime <= maxLastUpdateTime && (state == 'INITED' || state == 'EXECUTING')");
q.declareParameters("int maxLastUpdateTime");
List<MScheduledExecution> results = (List<MScheduledExecution>) q.execute(maxLastUpdateTime);
for (MScheduledExecution e : results) {
ScheduledQueryProgressInfo info = new ScheduledQueryProgressInfo();
info.setScheduledExecutionId(e.getScheduledExecutionId());
info.setState(QueryState.TIMED_OUT);
info.setErrorMessage("Query stuck in: " + e.getState() + " state for >" + timeoutSecs + " seconds. Execution timed out.");
// info.set
scheduledQueryProgress(info);
}
recoverInvalidScheduledQueryState(timeoutSecs);
committed = commitTransaction();
return results.size();
} finally {
rollbackAndCleanup(committed, q);
}
}
Aggregations