use of org.apache.hadoop.hive.metastore.api.WMFullResourcePlan in project hive by apache.
the class ObjectStore method getActiveResourcePlan.
@Override
public WMFullResourcePlan getActiveResourcePlan() throws MetaException {
// Note: fullFromMResroucePlan needs to be called inside the txn, otherwise we could have
// deduplicated this with getActiveMWMResourcePlan.
boolean commited = false;
Query query = null;
WMFullResourcePlan result = null;
try {
openTransaction();
query = pm.newQuery(MWMResourcePlan.class, "status == activeStatus");
query.declareParameters("java.lang.String activeStatus");
query.setUnique(true);
MWMResourcePlan mResourcePlan = (MWMResourcePlan) query.execute(Status.ACTIVE.toString());
if (mResourcePlan != null) {
result = fullFromMResourcePlan(mResourcePlan);
}
commited = commitTransaction();
} finally {
rollbackAndCleanup(commited, query);
}
return result;
}
use of org.apache.hadoop.hive.metastore.api.WMFullResourcePlan in project hive by apache.
the class TestJsonRPFormatter method testJsonRPFormatter.
@Test
public void testJsonRPFormatter() throws Exception {
WMFullResourcePlan fullRp = createRP("test_rp_2", 10, "def");
addPool(fullRp, "pool1", 0.3, 3, "fair");
addTrigger(fullRp, "trigger1", "KILL", "BYTES > 2", "pool1");
addPool(fullRp, "pool2", 0.7, 7, "fcfs");
addMapping(fullRp, "user", "foo", "pool2");
addMapping(fullRp, "user", "bar", "pool2");
formatter.showFullResourcePlan(out, fullRp);
out.flush();
ObjectMapper objectMapper = new ObjectMapper();
JsonNode jsonTree = objectMapper.readTree(bos.toByteArray());
assertNotNull(jsonTree);
assertTrue(jsonTree.isObject());
assertEquals("test_rp_2", jsonTree.get("name").asText());
assertEquals(10, jsonTree.get("parallelism").asInt());
assertEquals("def", jsonTree.get("defaultPool").asText());
assertTrue(jsonTree.get("pools").isArray());
assertEquals(2, jsonTree.get("pools").size());
JsonNode pool2 = jsonTree.get("pools").get(0);
assertEquals("pool2", pool2.get("name").asText());
assertEquals("fcfs", pool2.get("schedulingPolicy").asText());
assertEquals(7, pool2.get("parallelism").asInt());
assertEquals(0.7, pool2.get("allocFraction").asDouble(), 0.00001);
assertTrue(pool2.get("triggers").isArray());
assertEquals(0, pool2.get("triggers").size());
assertTrue(pool2.get("mappings").isArray());
JsonNode type0 = pool2.get("mappings").get(0);
assertEquals("user", type0.get("type").asText());
assertTrue(type0.get("values").isArray());
assertEquals(2, type0.get("values").size());
HashSet<String> vals = new HashSet<>();
for (int i = 0; i < type0.get("values").size(); ++i) {
vals.add(type0.get("values").get(i).asText());
}
assertTrue(vals.contains("foo"));
assertTrue(vals.contains("bar"));
JsonNode pool1 = jsonTree.get("pools").get(1);
assertEquals("pool1", pool1.get("name").asText());
assertEquals("fair", pool1.get("schedulingPolicy").asText());
assertEquals(3, pool1.get("parallelism").asInt());
assertEquals(0.3, pool1.get("allocFraction").asDouble(), 0.00001);
assertTrue(pool1.get("triggers").isArray());
assertEquals(1, pool1.get("triggers").size());
JsonNode trigger1 = pool1.get("triggers").get(0);
assertEquals("trigger1", trigger1.get("name").asText());
assertEquals("KILL", trigger1.get("action").asText());
assertEquals("BYTES > 2", trigger1.get("trigger").asText());
}
use of org.apache.hadoop.hive.metastore.api.WMFullResourcePlan in project hive by apache.
the class TestJsonRPFormatter method createRP.
private WMFullResourcePlan createRP(String name, Integer parallelism, String defaultPoolPath) {
WMResourcePlan rp = new WMResourcePlan(name);
rp.setStatus(WMResourcePlanStatus.ACTIVE);
if (parallelism != null) {
rp.setQueryParallelism(parallelism);
}
if (defaultPoolPath != null) {
rp.setDefaultPoolPath(defaultPoolPath);
}
WMFullResourcePlan fullRp = new WMFullResourcePlan(rp, new ArrayList<>());
return fullRp;
}
use of org.apache.hadoop.hive.metastore.api.WMFullResourcePlan in project hive by apache.
the class TestJsonRPFormatter method testJsonEmptyRPFormatter.
@Test
public void testJsonEmptyRPFormatter() throws Exception {
WMFullResourcePlan fullRp = createRP("test_rp_1", null, null);
formatter.showFullResourcePlan(out, fullRp);
out.flush();
ObjectMapper objectMapper = new ObjectMapper();
JsonNode jsonTree = objectMapper.readTree(bos.toByteArray());
assertNotNull(jsonTree);
assertTrue(jsonTree.isObject());
assertEquals("test_rp_1", jsonTree.get("name").asText());
assertTrue(jsonTree.get("parallelism").isNull());
assertTrue(jsonTree.get("defaultPool").isNull());
assertTrue(jsonTree.get("pools").isArray());
assertEquals(0, jsonTree.get("pools").size());
}
use of org.apache.hadoop.hive.metastore.api.WMFullResourcePlan in project hive by apache.
the class TestWorkloadManager method testApplyPlanQpChanges.
@Test(timeout = 10000)
public void testApplyPlanQpChanges() throws Exception {
final HiveConf conf = createConf();
MockQam qam = new MockQam();
WMFullResourcePlan plan = new WMFullResourcePlan(plan(), Lists.newArrayList(pool("A", 1, 0.35f), pool("B", 2, 0.15f), pool("C", 2, 0.3f), pool("D", 1, 0.3f)));
plan.setMappings(Lists.newArrayList(mapping("A", "A"), mapping("B", "B"), mapping("C", "C"), mapping("D", "D")));
final WorkloadManager wm = new WorkloadManagerForTest("test", conf, qam, plan);
wm.start();
TezSessionPool<WmTezSession> tezAmPool = wm.getTezAmPool();
assertEquals(6, tezAmPool.getCurrentSize());
// A: 1/1 running, 1 queued; B: 2/2 running, C: 1/2 running, D: 1/1 running, 1 queued.
// Total: 5/6 running.
WmTezSession sessionA1 = (WmTezSession) wm.getSession(null, mappingInput("A"), conf), sessionB1 = (WmTezSession) wm.getSession(null, mappingInput("B"), conf), sessionB2 = (WmTezSession) wm.getSession(null, mappingInput("B"), conf), sessionC1 = (WmTezSession) wm.getSession(null, mappingInput("C"), conf), sessionD1 = (WmTezSession) wm.getSession(null, mappingInput("D"), conf);
final AtomicReference<WmTezSession> sessionA2 = new AtomicReference<>(), sessionD2 = new AtomicReference<>();
final AtomicReference<Throwable> error = new AtomicReference<>();
final CountDownLatch cdl1 = new CountDownLatch(1), cdl2 = new CountDownLatch(1);
Thread t1 = new Thread(new GetSessionRunnable(sessionA2, wm, error, conf, cdl1, "A")), t2 = new Thread(new GetSessionRunnable(sessionD2, wm, error, conf, cdl2, "D"));
waitForThreadToBlock(cdl1, t1);
waitForThreadToBlock(cdl2, t2);
checkError(error);
assertEquals(0.3f, sessionC1.getClusterFraction(), EPSILON);
assertEquals(0.3f, sessionD1.getClusterFraction(), EPSILON);
assertEquals(1, tezAmPool.getCurrentSize());
// Change the resource plan - resize B and C down, D up, and remove A remapping users to B.
// Everything will be killed in A and B, C won't change, D will start one more query from
// the queue, and the query queued in A will be re-queued in B and started.
// The fractions will also all change.
// Total: 4/4 running.
plan = new WMFullResourcePlan(plan(), Lists.newArrayList(pool("B", 1, 0.3f), pool("C", 1, 0.2f), pool("D", 2, 0.5f)));
plan.setMappings(Lists.newArrayList(mapping("A", "B"), mapping("B", "B"), mapping("C", "C"), mapping("D", "D")));
wm.updateResourcePlanAsync(plan);
wm.addTestEvent().get();
joinThread(t1);
joinThread(t2);
checkError(error);
assertNotNull(sessionA2.get());
assertNotNull(sessionD2.get());
assertEquals("D", sessionD2.get().getPoolName());
assertEquals("B", sessionA2.get().getPoolName());
assertEquals("C", sessionC1.getPoolName());
assertEquals(0.3f, sessionA2.get().getClusterFraction(), EPSILON);
assertEquals(0.2f, sessionC1.getClusterFraction(), EPSILON);
assertEquals(0.25f, sessionD1.getClusterFraction(), EPSILON);
assertKilledByWm(sessionA1);
assertKilledByWm(sessionB1);
assertKilledByWm(sessionB2);
assertEquals(0, tezAmPool.getCurrentSize());
// Wait for another iteration to make sure event gets processed for D2 to receive allocation.
sessionA2.get().returnToSessionManager();
assertEquals(0.25f, sessionD2.get().getClusterFraction(), EPSILON);
// Return itself should be a no-op - the pool went from 6 to 4 with 1 session in the pool.
sessionD2.get().returnToSessionManager();
sessionC1.returnToSessionManager();
sessionD1.returnToSessionManager();
// Try to "return" stuff that was killed from "under" us. Should be a no-op.
sessionA1.returnToSessionManager();
sessionB1.returnToSessionManager();
sessionB2.returnToSessionManager();
assertEquals(4, tezAmPool.getCurrentSize());
}
Aggregations