use of org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec in project hive by apache.
the class TaskExecutorService method getExecutorsStatus.
@Override
public Set<String> getExecutorsStatus() {
// TODO Change this method to make the output easier to parse (parse programmatically)
Set<String> result = new LinkedHashSet<>();
Set<String> running = new LinkedHashSet<>();
Set<String> waiting = new LinkedHashSet<>();
StringBuilder value = new StringBuilder();
for (Map.Entry<String, TaskWrapper> e : knownTasks.entrySet()) {
boolean isWaiting;
value.setLength(0);
value.append(e.getKey());
TaskWrapper task = e.getValue();
boolean isFirst = true;
TaskRunnerCallable c = task.getTaskRunnerCallable();
if (c != null && c.getVertexSpec() != null) {
SignableVertexSpec fs = c.getVertexSpec();
value.append(isFirst ? " (" : ", ").append(c.getQueryId()).append("/").append(fs.getVertexName());
isFirst = false;
}
value.append(isFirst ? " (" : ", ");
if (task.isInWaitQueue()) {
isWaiting = true;
value.append("in queue");
} else if (c != null) {
long startTime = c.getStartTime();
if (startTime != 0) {
isWaiting = false;
value.append("started at ").append(sdf.get().format(new Date(startTime)));
} else {
isWaiting = false;
value.append("not started");
}
} else {
isWaiting = true;
value.append("has no callable");
}
if (task.isInPreemptionQueue()) {
value.append(", ").append("preemptable");
}
value.append(")");
if (isWaiting) {
waiting.add(value.toString());
} else {
running.add(value.toString());
}
}
result.addAll(waiting);
result.addAll(running);
return result;
}
use of org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec in project hive by apache.
the class TaskExecutorService method describeTask.
private boolean describeTask(StringBuilder value, String attemptId, TaskWrapper task, boolean fromQueue) {
value.setLength(0);
boolean isFirst = true;
TaskRunnerCallable c = task.getTaskRunnerCallable();
value.append(attemptId);
if (c != null && c.getVertexSpec() != null) {
SignableVertexSpec fs = c.getVertexSpec();
value.append(isFirst ? " (" : ", ").append(c.getQueryId()).append("/").append(fs.getVertexName()).append(c.isGuaranteed() ? ", guaranteed" : "");
if (fs.getDagName() != null) {
value.append(", dagName ").append(fs.getDagName());
}
isFirst = false;
}
value.append(isFirst ? " (" : ", ");
if (fromQueue) {
value.append("in queue (in order)");
}
boolean isWaiting;
if (task.isInWaitQueue()) {
isWaiting = true;
if (!fromQueue) {
value.append("in queue (not in order)");
}
} else if (c != null) {
long startTime = c.getStartTime();
isWaiting = false;
if (startTime != 0) {
value.append("started at ").append(sdf.get().format(new Date(startTime)));
} else {
value.append("not started");
}
} else {
isWaiting = true;
value.append("has no callable");
}
if (task.isInPreemptionQueue()) {
value.append(", ").append("in preemption queue");
}
boolean canFinish = c.canFinish();
value.append(", ").append(canFinish ? "can" : "cannot").append(" finish");
if (canFinish != c.canFinishForPriority()) {
value.append(" (not updated in queue)");
}
value.append(")");
return isWaiting;
}
use of org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec in project hive by apache.
the class TaskExecutorService method canPreempt.
/**
* Victim Task (A) should be preempted in favor of a candidate Task (B) when:
* 1. A is NOT on the same Vertex as B AND
* 1.1. B is a Guaranteed Task while A is not OR
* 1.2. Both are guaranteed but A is not finishable and B is
* To make sure that Victim task is not behind some upstream updates (asynchronous),
* we check its sources' state (by QueryFragmentInfo.canFinish method)
* @param candidate Task
* @param victim Task
* @return True when victim should be preempted in favor of candidate Task
*/
private static boolean canPreempt(TaskWrapper candidate, TaskWrapper victim) {
if (victim == null)
return false;
SignableVertexSpec candVrtx = candidate.getTaskRunnerCallable().getFragmentInfo().getVertexSpec();
SignableVertexSpec vicVrtx = victim.getTaskRunnerCallable().getFragmentInfo().getVertexSpec();
if (candVrtx.getHiveQueryId().equals(vicVrtx.getHiveQueryId()) && candVrtx.getVertexIndex() == vicVrtx.getVertexIndex())
return false;
if (candidate.isGuaranteed() && !victim.isGuaranteed())
return true;
return ((candidate.isGuaranteed() == victim.isGuaranteed()) && candidate.canFinishForPriority() && !victim.getTaskRunnerCallable().canFinish());
}
use of org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec in project hive by apache.
the class TestConverters method testFragmentSpecToTaskSpec.
@Test(timeout = 10000)
public void testFragmentSpecToTaskSpec() {
ByteBuffer procBb = ByteBuffer.allocate(4);
procBb.putInt(0, 200);
ByteBuffer input1Bb = ByteBuffer.allocate(4);
input1Bb.putInt(0, 300);
ByteBuffer output1Bb = ByteBuffer.allocate(4);
output1Bb.putInt(0, 400);
ApplicationId appId = ApplicationId.newInstance(1000, 100);
TezDAGID tezDagId = TezDAGID.getInstance(appId, 300);
TezVertexID tezVertexId = TezVertexID.getInstance(tezDagId, 400);
TezTaskID tezTaskId = TezTaskID.getInstance(tezVertexId, 500);
TezTaskAttemptID tezTaskAttemptId = TezTaskAttemptID.getInstance(tezTaskId, 600);
QueryIdentifierProto queryIdentifierProto = QueryIdentifierProto.newBuilder().setApplicationIdString(appId.toString()).setAppAttemptNumber(333).setDagIndex(tezDagId.getId()).build();
SignableVertexSpec.Builder builder = SignableVertexSpec.newBuilder();
builder.setQueryIdentifier(queryIdentifierProto);
builder.setHiveQueryId("hiveQueryId");
builder.setVertexIndex(tezVertexId.getId());
builder.setDagName("dagName");
builder.setVertexName("vertexName");
builder.setProcessorDescriptor(EntityDescriptorProto.newBuilder().setClassName("fakeProcessorName").setUserPayload(UserPayloadProto.newBuilder().setUserPayload(ByteString.copyFrom(procBb))));
builder.addInputSpecs(IOSpecProto.newBuilder().setConnectedVertexName("sourceVertexName1").setPhysicalEdgeCount(33).setIoDescriptor(EntityDescriptorProto.newBuilder().setClassName("input1ClassName").setUserPayload(UserPayloadProto.newBuilder().setUserPayload(ByteString.copyFrom(input1Bb)))));
builder.addInputSpecs(IOSpecProto.newBuilder().setConnectedVertexName("sourceVertexName2").setPhysicalEdgeCount(44).setIoDescriptor(EntityDescriptorProto.newBuilder().setClassName("input1ClassName").setUserPayload(UserPayloadProto.newBuilder().setUserPayload(ByteString.copyFrom(input1Bb)))));
builder.addOutputSpecs(IOSpecProto.newBuilder().setConnectedVertexName("destVertexName1").setPhysicalEdgeCount(55).setIoDescriptor(EntityDescriptorProto.newBuilder().setClassName("outputClassName").setUserPayload(UserPayloadProto.newBuilder().setUserPayload(ByteString.copyFrom(output1Bb)))));
builder.addOutputSpecs(IOSpecProto.newBuilder().setConnectedVertexName("destVertexName2").setPhysicalEdgeCount(66).setIoDescriptor(EntityDescriptorProto.newBuilder().setClassName("outputClassName").setUserPayload(UserPayloadProto.newBuilder().setUserPayload(ByteString.copyFrom(output1Bb)))));
SignableVertexSpec vertexProto = builder.build();
TaskSpec taskSpec = Converters.getTaskSpecfromProto(vertexProto, 0, 0, tezTaskAttemptId);
assertEquals("dagName", taskSpec.getDAGName());
assertEquals("vertexName", taskSpec.getVertexName());
assertEquals(tezTaskAttemptId, taskSpec.getTaskAttemptID());
assertEquals("fakeProcessorName", taskSpec.getProcessorDescriptor().getClassName());
byte[] serialized = new byte[taskSpec.getProcessorDescriptor().getUserPayload().getPayload().remaining()];
taskSpec.getProcessorDescriptor().getUserPayload().getPayload().get(serialized);
assertArrayEquals(procBb.array(), serialized);
assertEquals(2, taskSpec.getInputs().size());
assertEquals(2, taskSpec.getOutputs().size());
verifyInputSpecAndProto(taskSpec.getInputs().get(0), vertexProto.getInputSpecs(0));
verifyInputSpecAndProto(taskSpec.getInputs().get(1), vertexProto.getInputSpecs(1));
verifyOutputSpecAndProto(taskSpec.getOutputs().get(0), vertexProto.getOutputSpecs(0));
verifyOutputSpecAndProto(taskSpec.getOutputs().get(1), vertexProto.getOutputSpecs(1));
}
use of org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec in project hive by apache.
the class TestConverters method testTaskSpecToFragmentSpec.
@Test(timeout = 10000)
public void testTaskSpecToFragmentSpec() {
ByteBuffer procBb = ByteBuffer.allocate(4);
procBb.putInt(0, 200);
UserPayload processorPayload = UserPayload.create(procBb);
ProcessorDescriptor processorDescriptor = ProcessorDescriptor.create("fakeProcessorName").setUserPayload(processorPayload);
ByteBuffer input1Bb = ByteBuffer.allocate(4);
input1Bb.putInt(0, 300);
UserPayload input1Payload = UserPayload.create(input1Bb);
InputDescriptor id1 = InputDescriptor.create("input1ClassName").setUserPayload(input1Payload);
InputSpec inputSpec1 = new InputSpec("sourceVertexName1", id1, 33);
InputSpec inputSpec2 = new InputSpec("sourceVertexName2", id1, 44);
List<InputSpec> inputSpecList = Lists.newArrayList(inputSpec1, inputSpec2);
ByteBuffer output1Bb = ByteBuffer.allocate(4);
output1Bb.putInt(0, 400);
UserPayload output1Payload = UserPayload.create(output1Bb);
OutputDescriptor od1 = OutputDescriptor.create("output1ClassName").setUserPayload(output1Payload);
OutputSpec outputSpec1 = new OutputSpec("destVertexName1", od1, 55);
OutputSpec outputSpec2 = new OutputSpec("destVertexName2", od1, 66);
List<OutputSpec> outputSpecList = Lists.newArrayList(outputSpec1, outputSpec2);
ApplicationId appId = ApplicationId.newInstance(1000, 100);
TezDAGID tezDagId = TezDAGID.getInstance(appId, 300);
TezVertexID tezVertexId = TezVertexID.getInstance(tezDagId, 400);
TezTaskID tezTaskId = TezTaskID.getInstance(tezVertexId, 500);
TezTaskAttemptID tezTaskAttemptId = TezTaskAttemptID.getInstance(tezTaskId, 600);
TaskSpec taskSpec = new TaskSpec(tezTaskAttemptId, "dagName", "vertexName", 10, processorDescriptor, inputSpecList, outputSpecList, null);
QueryIdentifierProto queryIdentifierProto = QueryIdentifierProto.newBuilder().setApplicationIdString(appId.toString()).setAppAttemptNumber(333).setDagIndex(300).build();
SignableVertexSpec vertexProto = Converters.constructSignableVertexSpec(taskSpec, queryIdentifierProto, "", "", "hiveQueryId").build();
assertEquals("dagName", vertexProto.getDagName());
assertEquals("vertexName", vertexProto.getVertexName());
assertEquals("hiveQueryId", vertexProto.getHiveQueryId());
assertEquals(appId.toString(), vertexProto.getQueryIdentifier().getApplicationIdString());
assertEquals(tezDagId.getId(), vertexProto.getQueryIdentifier().getDagIndex());
assertEquals(333, vertexProto.getQueryIdentifier().getAppAttemptNumber());
assertEquals(tezVertexId.getId(), vertexProto.getVertexIndex());
assertEquals(processorDescriptor.getClassName(), vertexProto.getProcessorDescriptor().getClassName());
assertEquals(processorDescriptor.getUserPayload().getPayload(), vertexProto.getProcessorDescriptor().getUserPayload().getUserPayload().asReadOnlyByteBuffer());
assertEquals(2, vertexProto.getInputSpecsCount());
assertEquals(2, vertexProto.getOutputSpecsCount());
verifyInputSpecAndProto(inputSpec1, vertexProto.getInputSpecs(0));
verifyInputSpecAndProto(inputSpec2, vertexProto.getInputSpecs(1));
verifyOutputSpecAndProto(outputSpec1, vertexProto.getOutputSpecs(0));
verifyOutputSpecAndProto(outputSpec2, vertexProto.getOutputSpecs(1));
}
Aggregations