use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.
the class ObjectStore method alterResourcePlan.
@Override
public WMFullResourcePlan alterResourcePlan(String name, String ns, WMNullableResourcePlan changes, boolean canActivateDisabled, boolean canDeactivate, boolean isReplace) throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, MetaException {
name = name == null ? null : normalizeIdentifier(name);
if (isReplace && name == null) {
throw new InvalidOperationException("Cannot replace without specifying the source plan");
}
boolean commited = false;
Query query = null;
// This method only returns the result when activating a resource plan.
// We could also add a boolean flag to be specified by the caller to see
// when the result might be needed.
WMFullResourcePlan result = null;
try {
openTransaction();
if (isReplace) {
result = handleAlterReplace(name, ns, changes);
} else {
result = handleSimpleAlter(name, ns, changes, canActivateDisabled, canDeactivate);
}
commited = commitTransaction();
return result;
} catch (Exception e) {
checkForConstraintException(e, "Resource plan name should be unique: ");
throw e;
} finally {
rollbackAndCleanup(commited, query);
}
}
use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.
the class ObjectStore method handleAlterReplace.
private WMFullResourcePlan handleAlterReplace(String name, String ns, WMNullableResourcePlan changes) throws InvalidOperationException, NoSuchObjectException, MetaException {
// Verify that field changes are consistent with what Hive does. Note: we could handle this.
if (changes.isSetQueryParallelism() || changes.isSetDefaultPoolPath()) {
throw new InvalidOperationException("Cannot change values during replace.");
}
boolean isReplacingSpecific = changes.isSetName();
boolean isReplacingActive = (changes.isSetStatus() && changes.getStatus() == WMResourcePlanStatus.ACTIVE);
if (isReplacingActive == isReplacingSpecific) {
throw new InvalidOperationException("Must specify a name, or the active plan; received " + changes.getName() + ", " + (changes.isSetStatus() ? changes.getStatus() : null));
}
if (name == null) {
throw new InvalidOperationException("Invalid replace - no name specified");
}
ns = getNsOrDefault(ns);
MWMResourcePlan replacedPlan = isReplacingSpecific ? getMWMResourcePlan(changes.getName(), ns, false) : getActiveMWMResourcePlan(ns);
MWMResourcePlan plan = getMWMResourcePlan(name, ns, false);
if (replacedPlan.getName().equals(plan.getName())) {
throw new InvalidOperationException("A plan cannot replace itself");
}
String oldNs = getNsOrDefault(replacedPlan.getNs()), newNs = getNsOrDefault(plan.getNs());
if (!oldNs.equals(newNs)) {
throw new InvalidOperationException("Cannot change the namespace; replacing " + oldNs + " with " + newNs);
}
// We will inherit the name and status from the plan we are replacing.
String newName = replacedPlan.getName();
int i = 0;
String copyName = generateOldPlanName(newName, i);
while (true) {
MWMResourcePlan dup = getMWMResourcePlan(copyName, ns, false, false);
if (dup == null) {
break;
}
// Note: this can still conflict with parallel transactions. We do not currently handle
// parallel changes from two admins (by design :().
copyName = generateOldPlanName(newName, ++i);
}
replacedPlan.setName(copyName);
plan.setName(newName);
plan.setStatus(replacedPlan.getStatus());
replacedPlan.setStatus(MWMResourcePlan.Status.DISABLED);
// TODO: add a configurable option to skip the history and just drop it?
return plan.getStatus() == Status.ACTIVE ? fullFromMResourcePlan(plan) : null;
}
use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.
the class ObjectStore method scheduledQueryProgress.
@Override
public void scheduledQueryProgress(ScheduledQueryProgressInfo info) throws InvalidOperationException, MetaException {
ensureScheduledQueriesEnabled();
boolean commited = false;
try {
openTransaction();
MScheduledExecution execution = pm.getObjectById(MScheduledExecution.class, info.getScheduledExecutionId());
if (!validateStateChange(execution.getState(), info.getState())) {
throw new InvalidOperationException("Invalid state change: " + execution.getState() + "=>" + info.getState());
}
execution.setState(info.getState());
if (info.isSetExecutorQueryId()) {
execution.setExecutorQueryId(info.getExecutorQueryId());
}
if (info.isSetErrorMessage()) {
execution.setErrorMessage(abbreviateErrorMessage(info.getErrorMessage(), 1000));
}
switch(info.getState()) {
case INITED:
case EXECUTING:
execution.setLastUpdateTime((int) (System.currentTimeMillis() / 1000));
break;
case FAILED:
case FINISHED:
case TIMED_OUT:
execution.setEndTime((int) (System.currentTimeMillis() / 1000));
execution.setLastUpdateTime(null);
execution.getScheduledQuery().setActiveExecution(null);
break;
default:
throw new InvalidOperationException("invalid state: " + info.getState());
}
pm.makePersistent(execution);
processScheduledQueryPolicies(info);
commited = commitTransaction();
} finally {
if (!commited) {
rollbackTransaction();
}
}
}
use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.
the class ObjectStore method getTriggersForResourcePlan.
@Override
public List<WMTrigger> getTriggersForResourcePlan(String resourcePlanName, String ns) throws NoSuchObjectException, MetaException {
List<WMTrigger> triggers = new ArrayList();
boolean commited = false;
Query query = null;
try {
openTransaction();
MWMResourcePlan resourcePlan;
try {
resourcePlan = getMWMResourcePlan(resourcePlanName, ns, false);
} catch (InvalidOperationException e) {
// Should not happen, edit check is false.
throw new RuntimeException(e);
}
query = pm.newQuery(MWMTrigger.class, "resourcePlan == rp");
query.declareParameters("MWMResourcePlan rp");
List<MWMTrigger> mTriggers = (List<MWMTrigger>) query.execute(resourcePlan);
pm.retrieveAll(mTriggers);
commited = commitTransaction();
if (mTriggers != null) {
for (MWMTrigger trigger : mTriggers) {
triggers.add(fromMWMTrigger(trigger, resourcePlanName));
}
}
} finally {
rollbackAndCleanup(commited, query);
}
return triggers;
}
use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.
the class ObjectStore method createResourcePlan.
@Override
public void createResourcePlan(WMResourcePlan resourcePlan, String copyFromName, int defaultPoolSize) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException {
boolean commited = false;
String rpName = normalizeIdentifier(resourcePlan.getName());
if (rpName.isEmpty()) {
throw new InvalidObjectException("Resource name cannot be empty.");
}
MWMResourcePlan rp = null;
if (copyFromName == null) {
Integer queryParallelism = null;
if (resourcePlan.isSetQueryParallelism()) {
queryParallelism = resourcePlan.getQueryParallelism();
if (queryParallelism <= 0) {
throw new InvalidObjectException("Query parallelism should be positive.");
}
}
rp = new MWMResourcePlan(rpName, queryParallelism, Status.DISABLED);
} else {
rp = new MWMResourcePlan(rpName, null, Status.DISABLED);
}
rp.setNs(resourcePlan.getNs());
try {
openTransaction();
pm.makePersistent(rp);
if (copyFromName != null) {
String ns = getNsOrDefault(resourcePlan.getNs());
MWMResourcePlan copyFrom = getMWMResourcePlan(copyFromName, ns, false);
if (copyFrom == null) {
throw new NoSuchObjectException(copyFromName + " in " + ns);
}
copyRpContents(rp, copyFrom);
} else {
// all the RawStore-s. Right now there's no method to create a pool.
if (defaultPoolSize > 0) {
MWMPool defaultPool = new MWMPool(rp, "default", 1.0, defaultPoolSize, null);
pm.makePersistent(defaultPool);
rp.setPools(Sets.newHashSet(defaultPool));
rp.setDefaultPool(defaultPool);
}
}
commited = commitTransaction();
} catch (InvalidOperationException e) {
throw new RuntimeException(e);
} catch (Exception e) {
checkForConstraintException(e, "Resource plan already exists: ");
throw e;
} finally {
if (!commited) {
rollbackTransaction();
}
}
}
Aggregations