use of org.apache.hadoop.hive.metastore.api.ScheduledQuery._Fields in project hive by apache.
the class TestMetastoreScheduledQueries method testNormalDelete.
@Test
public void testNormalDelete() throws Exception {
ScheduledQuery schq = createScheduledQuery(createKey("q1", "nsdel"));
ScheduledQueryMaintenanceRequest r = new ScheduledQueryMaintenanceRequest();
r.setType(ScheduledQueryMaintenanceRequestType.CREATE);
r.setScheduledQuery(schq);
client.scheduledQueryMaintenance(r);
r.setType(ScheduledQueryMaintenanceRequestType.DROP);
client.scheduledQueryMaintenance(r);
}
use of org.apache.hadoop.hive.metastore.api.ScheduledQuery._Fields in project hive by apache.
the class ColumnStatsMergerFactory method getColumnStatsMerger.
/**
* Get a statistics merger to merge the given statistics object.
*
* @param statsObjNew A statistics object to merger
* @param statsObjOld A statistics object to merger
* @return A ColumnStatsMerger object that can process the requested type
* @throws IllegalArgumentException if the column statistics objects are of
* two different types or if they are of an unknown type
* @throws NullPointerException if statistics object is {@code null}
*/
public static ColumnStatsMerger getColumnStatsMerger(final ColumnStatisticsObj statsObjNew, final ColumnStatisticsObj statsObjOld) {
Objects.requireNonNull(statsObjNew, "Column 1 statistcs cannot be null");
Objects.requireNonNull(statsObjOld, "Column 2 statistcs cannot be null");
final _Fields typeNew = statsObjNew.getStatsData().getSetField();
final _Fields typeOld = statsObjOld.getStatsData().getSetField();
Preconditions.checkArgument(typeNew == typeOld, "The column types must match: [" + typeNew + "::" + typeOld + "]");
switch(typeNew) {
case BOOLEAN_STATS:
return new BooleanColumnStatsMerger();
case LONG_STATS:
return new LongColumnStatsMerger();
case DOUBLE_STATS:
return new DoubleColumnStatsMerger();
case STRING_STATS:
return new StringColumnStatsMerger();
case BINARY_STATS:
return new BinaryColumnStatsMerger();
case DECIMAL_STATS:
return new DecimalColumnStatsMerger();
case DATE_STATS:
return new DateColumnStatsMerger();
case TIMESTAMP_STATS:
return new TimestampColumnStatsMerger();
default:
throw new IllegalArgumentException("Unknown stats type: " + statsObjNew.getStatsData().getSetField());
}
}
use of org.apache.hadoop.hive.metastore.api.ScheduledQuery._Fields in project hive by apache.
the class TestMetastoreScheduledQueries method testCreateWithInvalidSchedule.
@Test(expected = InvalidInputException.class)
public void testCreateWithInvalidSchedule() throws Exception {
ScheduledQuery schq = createScheduledQuery(createKey("createInvalidSch", "c1"));
schq.setSchedule("asd asd");
ScheduledQueryMaintenanceRequest r = new ScheduledQueryMaintenanceRequest();
r.setType(ScheduledQueryMaintenanceRequestType.CREATE);
r.setScheduledQuery(schq);
client.scheduledQueryMaintenance(r);
}
use of org.apache.hadoop.hive.metastore.api.ScheduledQuery._Fields in project hive by apache.
the class TestMetastoreScheduledQueries method testOutdatedCleanup.
@Test
public void testOutdatedCleanup() throws Exception {
String namespace = "outdatedcleanup";
ObjectStore objStore = new ObjectStore();
objStore.setConf(metaStore.getConf());
objStore.deleteScheduledExecutions(0);
ScheduledQuery schq = createScheduledQuery(new ScheduledQueryKey("q1", namespace));
ScheduledQueryMaintenanceRequest r = new ScheduledQueryMaintenanceRequest();
r.setType(ScheduledQueryMaintenanceRequestType.CREATE);
r.setScheduledQuery(schq);
objStore.scheduledQueryMaintenance(r);
Thread.sleep(1000);
ScheduledQueryPollRequest request = new ScheduledQueryPollRequest(namespace);
ScheduledQueryPollResponse pollResult = objStore.scheduledQueryPoll(request);
// will add q1 as a query being executed
Thread.sleep(1000);
objStore.markScheduledExecutionsTimedOut(0);
try (PersistenceManager pm = PersistenceManagerProvider.getPersistenceManager()) {
MScheduledExecution execution = pm.getObjectById(MScheduledExecution.class, pollResult.getExecutionId());
assertEquals(QueryState.TIMED_OUT, execution.getState());
}
}
use of org.apache.hadoop.hive.metastore.api.ScheduledQuery._Fields in project hive by apache.
the class ObjectStore method processScheduledQueryPolicies.
private void processScheduledQueryPolicies(ScheduledQueryProgressInfo info) throws MetaException {
if (info.getState() != QueryState.FAILED && info.getState() != QueryState.TIMED_OUT) {
return;
}
int autoDisableCount = MetastoreConf.getIntVar(conf, ConfVars.SCHEDULED_QUERIES_AUTODISABLE_COUNT);
int skipCount = MetastoreConf.getIntVar(conf, ConfVars.SCHEDULED_QUERIES_SKIP_OPPORTUNITIES_AFTER_FAILURES);
int lastN = Math.max(autoDisableCount, skipCount);
if (lastN <= 0) {
// disabled
return;
}
boolean commited = false;
Query query = null;
try {
openTransaction();
MScheduledExecution lastExecution = pm.getObjectById(MScheduledExecution.class, info.getScheduledExecutionId());
MScheduledQuery schq = lastExecution.getScheduledQuery();
query = pm.newQuery(MScheduledExecution.class);
query.setFilter("scheduledQuery == currentSchedule");
query.setOrdering("scheduledExecutionId descending");
query.declareParameters("MScheduledQuery currentSchedule");
query.setRange(0, lastN);
List<MScheduledExecution> list = (List<MScheduledExecution>) query.execute(schq);
int failureCount = 0;
for (int i = 0; i < list.size(); i++) {
if (list.get(i).getState() != QueryState.FAILED && list.get(i).getState() != QueryState.TIMED_OUT) {
break;
}
failureCount++;
}
if (autoDisableCount > 0 && autoDisableCount <= failureCount) {
LOG.info("Disabling {} after {} consequtive failures", schq.getScheduleKey(), autoDisableCount);
schq.setEnabled(false);
int now = (int) (System.currentTimeMillis() / 1000);
MScheduledExecution execution = new MScheduledExecution();
execution.setScheduledQuery(schq);
execution.setState(QueryState.AUTO_DISABLED);
execution.setStartTime(now);
execution.setEndTime(now);
execution.setLastUpdateTime(now);
execution.setErrorMessage(String.format("Disabling query after {} consequtive failures", autoDisableCount));
pm.makePersistent(execution);
}
if (skipCount > 0) {
int n = Math.min(skipCount, failureCount) - 1;
Integer scheduledTime = schq.getNextExecution();
for (int i = 0; i < n; i++) {
if (scheduledTime != null) {
scheduledTime = computeNextExecutionTime(schq.getSchedule(), scheduledTime);
}
}
if (scheduledTime != null) {
schq.setNextExecution(scheduledTime);
}
}
commited = commitTransaction();
} catch (InvalidInputException e) {
throw new MetaException("Unexpected InvalidInputException: " + e.getMessage());
} finally {
rollbackAndCleanup(commited, query);
}
}
Aggregations