use of org.apache.hadoop.hive.metastore.model.MWMMapping in project hive by apache.
the class ObjectStore method createOrUpdateWMMapping.
@Override
public void createOrUpdateWMMapping(WMMapping mapping, boolean update) throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, MetaException {
EntityType entityType = EntityType.valueOf(mapping.getEntityType().trim().toUpperCase());
String entityName = normalizeIdentifier(mapping.getEntityName());
boolean commited = false;
Query query = null;
try {
openTransaction();
MWMResourcePlan resourcePlan = getMWMResourcePlan(mapping.getResourcePlanName(), true);
MWMPool pool = null;
if (mapping.isSetPoolPath()) {
pool = getPool(resourcePlan, mapping.getPoolPath());
}
if (!update) {
MWMMapping mMapping = new MWMMapping(resourcePlan, entityType, entityName, pool, mapping.getOrdering());
pm.makePersistent(mMapping);
} else {
query = pm.newQuery(MWMMapping.class, "resourcePlan == rp && entityType == type " + "&& entityName == name");
query.declareParameters("MWMResourcePlan rp, java.lang.String type, java.lang.String name");
query.setUnique(true);
MWMMapping mMapping = (MWMMapping) query.execute(resourcePlan, entityType.toString(), entityName);
mMapping.setPool(pool);
}
commited = commitTransaction();
} finally {
rollbackAndCleanup(commited, query);
}
}
use of org.apache.hadoop.hive.metastore.model.MWMMapping in project hive by apache.
the class ObjectStore method copyRpContents.
private void copyRpContents(MWMResourcePlan dest, MWMResourcePlan src) {
dest.setQueryParallelism(src.getQueryParallelism());
Map<String, MWMPool> pools = new HashMap<>();
Map<String, Set<MWMPool>> triggersToPools = new HashMap<>();
for (MWMPool copyPool : src.getPools()) {
MWMPool pool = new MWMPool(dest, copyPool.getPath(), copyPool.getAllocFraction(), copyPool.getQueryParallelism(), copyPool.getSchedulingPolicy());
pm.makePersistent(pool);
pools.put(copyPool.getPath(), pool);
if (copyPool.getTriggers() != null) {
for (MWMTrigger trigger : copyPool.getTriggers()) {
Set<MWMPool> p2t = triggersToPools.get(trigger.getName());
if (p2t == null) {
p2t = new HashSet<>();
triggersToPools.put(trigger.getName(), p2t);
}
p2t.add(pool);
pool.setTriggers(new HashSet<>());
}
}
}
dest.setPools(new HashSet<>(pools.values()));
if (src.getDefaultPool() != null) {
dest.setDefaultPool(pools.get(src.getDefaultPool().getPath()));
}
Set<MWMMapping> mappings = new HashSet<>();
for (MWMMapping copyMapping : src.getMappings()) {
MWMPool pool = null;
if (copyMapping.getPool() != null) {
pool = pools.get(copyMapping.getPool().getPath());
}
MWMMapping mapping = new MWMMapping(dest, copyMapping.getEntityType(), copyMapping.getEntityName(), pool, copyMapping.getOrdering());
pm.makePersistent(mapping);
mappings.add(mapping);
}
dest.setMappings(mappings);
Set<MWMTrigger> triggers = new HashSet<>();
for (MWMTrigger copyTrigger : src.getTriggers()) {
Set<MWMPool> p2t = triggersToPools.get(copyTrigger.getName());
if (p2t == null) {
p2t = new HashSet<>();
}
MWMTrigger trigger = new MWMTrigger(dest, copyTrigger.getName(), copyTrigger.getTriggerExpression(), copyTrigger.getActionExpression(), p2t, copyTrigger.getIsInUnmanaged());
pm.makePersistent(trigger);
for (MWMPool pool : p2t) {
pool.getTriggers().add(trigger);
}
triggers.add(trigger);
}
dest.setTriggers(triggers);
}
use of org.apache.hadoop.hive.metastore.model.MWMMapping in project hive by apache.
the class ObjectStore method fullFromMResourcePlan.
private WMFullResourcePlan fullFromMResourcePlan(MWMResourcePlan mplan) {
if (mplan == null) {
return null;
}
WMFullResourcePlan rp = new WMFullResourcePlan();
rp.setPlan(fromMResourcePlan(mplan));
for (MWMPool mPool : mplan.getPools()) {
rp.addToPools(fromMPool(mPool, mplan.getName()));
for (MWMTrigger mTrigger : mPool.getTriggers()) {
rp.addToPoolTriggers(new WMPoolTrigger(mPool.getPath(), mTrigger.getName()));
}
}
for (MWMTrigger mTrigger : mplan.getTriggers()) {
rp.addToTriggers(fromMWMTrigger(mTrigger, mplan.getName()));
}
for (MWMMapping mMapping : mplan.getMappings()) {
rp.addToMappings(fromMMapping(mMapping, mplan.getName()));
}
return rp;
}
Aggregations