use of org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto in project hive by apache.
the class Converters method getTaskSpecfromProto.
public static TaskSpec getTaskSpecfromProto(SignableVertexSpec vectorProto, int fragmentNum, int attemptNum, TezTaskAttemptID attemptId) {
TezTaskAttemptID taskAttemptID = attemptId != null ? attemptId : createTaskAttemptId(vectorProto.getQueryIdentifier(), vectorProto.getVertexIndex(), fragmentNum, attemptNum);
ProcessorDescriptor processorDescriptor = null;
if (vectorProto.hasProcessorDescriptor()) {
processorDescriptor = convertProcessorDescriptorFromProto(vectorProto.getProcessorDescriptor());
}
List<InputSpec> inputSpecList = new ArrayList<InputSpec>(vectorProto.getInputSpecsCount());
if (vectorProto.getInputSpecsCount() > 0) {
for (IOSpecProto inputSpecProto : vectorProto.getInputSpecsList()) {
inputSpecList.add(getInputSpecFromProto(inputSpecProto));
}
}
List<OutputSpec> outputSpecList = new ArrayList<OutputSpec>(vectorProto.getOutputSpecsCount());
if (vectorProto.getOutputSpecsCount() > 0) {
for (IOSpecProto outputSpecProto : vectorProto.getOutputSpecsList()) {
outputSpecList.add(getOutputSpecFromProto(outputSpecProto));
}
}
List<GroupInputSpec> groupInputSpecs = new ArrayList<GroupInputSpec>(vectorProto.getGroupedInputSpecsCount());
if (vectorProto.getGroupedInputSpecsCount() > 0) {
for (GroupInputSpecProto groupInputSpecProto : vectorProto.getGroupedInputSpecsList()) {
groupInputSpecs.add(getGroupInputSpecFromProto(groupInputSpecProto));
}
}
TaskSpec taskSpec = new TaskSpec(taskAttemptID, vectorProto.getDagName(), vectorProto.getVertexName(), vectorProto.getVertexParallelism(), processorDescriptor, inputSpecList, outputSpecList, groupInputSpecs);
return taskSpec;
}
use of org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto in project hive by apache.
the class QueryFragmentInfo method registerForFinishableStateUpdates.
/**
* @param handler
* @param lastFinishableState
* @return true if the current state is the same as the lastFinishableState. false if the state has already changed.
*/
public boolean registerForFinishableStateUpdates(FinishableStateUpdateHandler handler, boolean lastFinishableState) {
List<String> sourcesOfInterest = new LinkedList<>();
List<IOSpecProto> inputSpecList = vertexSpec.getInputSpecsList();
if (inputSpecList != null && !inputSpecList.isEmpty()) {
for (IOSpecProto inputSpec : inputSpecList) {
if (LlapTezUtils.isSourceOfInterest(inputSpec.getIoDescriptor().getClassName())) {
sourcesOfInterest.add(inputSpec.getConnectedVertexName());
}
}
}
return queryInfo.registerForFinishableStateUpdates(handler, sourcesOfInterest, this, lastFinishableState);
}
use of org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto in project hive by apache.
the class QueryFragmentInfo method canFinish.
// Hide this so it doesn't look like a simple property.
private boolean canFinish() {
List<IOSpecProto> inputSpecList = vertexSpec.getInputSpecsList();
boolean canFinish = true;
if (inputSpecList != null && !inputSpecList.isEmpty()) {
for (IOSpecProto inputSpec : inputSpecList) {
if (LlapTezUtils.isSourceOfInterest(inputSpec.getIoDescriptor().getClassName())) {
// Lookup the state in the map.
LlapDaemonProtocolProtos.SourceStateProto state = queryInfo.getSourceStateMap().get(inputSpec.getConnectedVertexName());
if (state != null && state == LlapDaemonProtocolProtos.SourceStateProto.S_SUCCEEDED) {
continue;
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Cannot finish due to source: " + inputSpec.getConnectedVertexName());
}
canFinish = false;
break;
}
}
}
}
return canFinish;
}
use of org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto in project hive by apache.
the class ContainerRunnerImpl method stringifySubmitRequest.
public static String stringifySubmitRequest(SubmitWorkRequestProto request, SignableVertexSpec vertex) {
StringBuilder sb = new StringBuilder();
sb.append("am_details=").append(request.getAmHost()).append(":").append(request.getAmPort());
sb.append(", taskInfo=").append(" fragment ").append(request.getFragmentNumber()).append(" attempt ").append(request.getAttemptNumber());
sb.append(", user=").append(vertex.getUser());
sb.append(", queryId=").append(vertex.getHiveQueryId());
sb.append(", appIdString=").append(vertex.getQueryIdentifier().getApplicationIdString());
sb.append(", appAttemptNum=").append(vertex.getQueryIdentifier().getAppAttemptNumber());
sb.append(", containerIdString=").append(request.getContainerIdString());
sb.append(", dagName=").append(vertex.getDagName());
sb.append(", vertexName=").append(vertex.getVertexName());
sb.append(", processor=").append(vertex.getProcessorDescriptor().getClassName());
sb.append(", numInputs=").append(vertex.getInputSpecsCount());
sb.append(", numOutputs=").append(vertex.getOutputSpecsCount());
sb.append(", numGroupedInputs=").append(vertex.getGroupedInputSpecsCount());
sb.append(", Inputs={");
if (vertex.getInputSpecsCount() > 0) {
for (IOSpecProto ioSpec : vertex.getInputSpecsList()) {
sb.append("{").append(ioSpec.getConnectedVertexName()).append(",").append(ioSpec.getIoDescriptor().getClassName()).append(",").append(ioSpec.getPhysicalEdgeCount()).append("}");
}
}
sb.append("}");
sb.append(", Outputs={");
if (vertex.getOutputSpecsCount() > 0) {
for (IOSpecProto ioSpec : vertex.getOutputSpecsList()) {
sb.append("{").append(ioSpec.getConnectedVertexName()).append(",").append(ioSpec.getIoDescriptor().getClassName()).append(",").append(ioSpec.getPhysicalEdgeCount()).append("}");
}
}
sb.append("}");
sb.append(", GroupedInputs={");
if (vertex.getGroupedInputSpecsCount() > 0) {
for (GroupInputSpecProto group : vertex.getGroupedInputSpecsList()) {
sb.append("{").append("groupName=").append(group.getGroupName()).append(", elements=").append(group.getGroupVerticesList()).append("}");
sb.append(group.getGroupVerticesList());
}
}
sb.append("}");
FragmentRuntimeInfo fragmentRuntimeInfo = request.getFragmentRuntimeInfo();
sb.append(", FragmentRuntimeInfo={");
sb.append("taskCount=").append(fragmentRuntimeInfo.getNumSelfAndUpstreamTasks());
sb.append(", completedTaskCount=").append(fragmentRuntimeInfo.getNumSelfAndUpstreamCompletedTasks());
sb.append(", dagStartTime=").append(fragmentRuntimeInfo.getDagStartTime());
sb.append(", firstAttemptStartTime=").append(fragmentRuntimeInfo.getFirstAttemptStartTime());
sb.append(", currentAttemptStartTime=").append(fragmentRuntimeInfo.getCurrentAttemptStartTime());
sb.append("}");
return sb.toString();
}
Aggregations