use of org.apache.hyracks.control.cc.work.GetResultStatusWork in project asterixdb by apache.
the class ClientInterfaceIPCI method deliverIncomingMessage.
@Override
public void deliverIncomingMessage(IIPCHandle handle, long mid, long rmid, Object payload, Exception exception) {
HyracksClientInterfaceFunctions.Function fn = (HyracksClientInterfaceFunctions.Function) payload;
switch(fn.getFunctionId()) {
case GET_CLUSTER_CONTROLLER_INFO:
try {
handle.send(mid, ccs.getClusterControllerInfo(), null);
} catch (IPCException e) {
LOGGER.log(Level.WARNING, "Error sending response to GET_CLUSTER_CONTROLLER_INFO request", e);
}
break;
case GET_JOB_STATUS:
HyracksClientInterfaceFunctions.GetJobStatusFunction gjsf = (HyracksClientInterfaceFunctions.GetJobStatusFunction) fn;
ccs.getWorkQueue().schedule(new GetJobStatusWork(ccs.getJobManager(), gjsf.getJobId(), new IPCResponder<>(handle, mid)));
break;
case GET_JOB_INFO:
HyracksClientInterfaceFunctions.GetJobInfoFunction gjif = (HyracksClientInterfaceFunctions.GetJobInfoFunction) fn;
ccs.getWorkQueue().schedule(new GetJobInfoWork(ccs.getJobManager(), gjif.getJobId(), new IPCResponder<JobInfo>(handle, mid)));
break;
case DISTRIBUTE_JOB:
HyracksClientInterfaceFunctions.DistributeJobFunction djf = (HyracksClientInterfaceFunctions.DistributeJobFunction) fn;
ccs.getWorkQueue().schedule(new DistributeJobWork(ccs, djf.getACGGFBytes(), jobIdFactory.create(), new IPCResponder<JobId>(handle, mid)));
break;
case DESTROY_JOB:
HyracksClientInterfaceFunctions.DestroyJobFunction dsjf = (HyracksClientInterfaceFunctions.DestroyJobFunction) fn;
ccs.getWorkQueue().schedule(new DestroyJobWork(ccs, dsjf.getJobId(), new IPCResponder<JobId>(handle, mid)));
break;
case CANCEL_JOB:
HyracksClientInterfaceFunctions.CancelJobFunction cjf = (HyracksClientInterfaceFunctions.CancelJobFunction) fn;
ccs.getWorkQueue().schedule(new CancelJobWork(ccs.getJobManager(), cjf.getJobId(), new IPCResponder<Void>(handle, mid)));
break;
case START_JOB:
HyracksClientInterfaceFunctions.StartJobFunction sjf = (HyracksClientInterfaceFunctions.StartJobFunction) fn;
JobId jobId = sjf.getJobId();
byte[] acggfBytes = null;
boolean predistributed = false;
if (jobId == null) {
//The job is new
jobId = jobIdFactory.create();
acggfBytes = sjf.getACGGFBytes();
} else {
//The job has been predistributed. We don't need to send an ActivityClusterGraph
predistributed = true;
}
ccs.getWorkQueue().schedule(new JobStartWork(ccs, sjf.getDeploymentId(), acggfBytes, sjf.getJobFlags(), jobId, new IPCResponder<JobId>(handle, mid), predistributed));
break;
case GET_DATASET_DIRECTORY_SERIVICE_INFO:
ccs.getWorkQueue().schedule(new GetDatasetDirectoryServiceInfoWork(ccs, new IPCResponder<NetworkAddress>(handle, mid)));
break;
case GET_DATASET_RESULT_STATUS:
HyracksClientInterfaceFunctions.GetDatasetResultStatusFunction gdrsf = (HyracksClientInterfaceFunctions.GetDatasetResultStatusFunction) fn;
ccs.getWorkQueue().schedule(new GetResultStatusWork(ccs, gdrsf.getJobId(), gdrsf.getResultSetId(), new IPCResponder<Status>(handle, mid)));
break;
case GET_DATASET_RESULT_LOCATIONS:
HyracksClientInterfaceFunctions.GetDatasetResultLocationsFunction gdrlf = (HyracksClientInterfaceFunctions.GetDatasetResultLocationsFunction) fn;
ccs.getWorkQueue().schedule(new GetResultPartitionLocationsWork(ccs, gdrlf.getJobId(), gdrlf.getResultSetId(), gdrlf.getKnownRecords(), new IPCResponder<>(handle, mid)));
break;
case WAIT_FOR_COMPLETION:
HyracksClientInterfaceFunctions.WaitForCompletionFunction wfcf = (HyracksClientInterfaceFunctions.WaitForCompletionFunction) fn;
ccs.getWorkQueue().schedule(new WaitForJobCompletionWork(ccs, wfcf.getJobId(), new IPCResponder<>(handle, mid)));
break;
case GET_NODE_CONTROLLERS_INFO:
ccs.getWorkQueue().schedule(new GetNodeControllersInfoWork(ccs.getNodeManager(), new IPCResponder<>(handle, mid)));
break;
case GET_CLUSTER_TOPOLOGY:
try {
handle.send(mid, ccs.getCCContext().getClusterTopology(), null);
} catch (IPCException e) {
LOGGER.log(Level.WARNING, "Error sending response to GET_CLUSTER_TOPOLOGY request", e);
}
break;
case CLI_DEPLOY_BINARY:
HyracksClientInterfaceFunctions.CliDeployBinaryFunction dbf = (HyracksClientInterfaceFunctions.CliDeployBinaryFunction) fn;
ccs.getWorkQueue().schedule(new CliDeployBinaryWork(ccs, dbf.getBinaryURLs(), dbf.getDeploymentId(), new IPCResponder<>(handle, mid)));
break;
case CLI_UNDEPLOY_BINARY:
HyracksClientInterfaceFunctions.CliUnDeployBinaryFunction udbf = (HyracksClientInterfaceFunctions.CliUnDeployBinaryFunction) fn;
ccs.getWorkQueue().schedule(new CliUnDeployBinaryWork(ccs, udbf.getDeploymentId(), new IPCResponder<>(handle, mid)));
break;
case CLUSTER_SHUTDOWN:
HyracksClientInterfaceFunctions.ClusterShutdownFunction csf = (HyracksClientInterfaceFunctions.ClusterShutdownFunction) fn;
ccs.getWorkQueue().schedule(new ClusterShutdownWork(ccs, csf.isTerminateNCService(), new IPCResponder<>(handle, mid)));
break;
case GET_NODE_DETAILS_JSON:
HyracksClientInterfaceFunctions.GetNodeDetailsJSONFunction gndjf = (HyracksClientInterfaceFunctions.GetNodeDetailsJSONFunction) fn;
ccs.getWorkQueue().schedule(new GetNodeDetailsJSONWork(ccs.getNodeManager(), ccs.getCCConfig(), gndjf.getNodeId(), gndjf.isIncludeStats(), gndjf.isIncludeConfig(), new IPCResponder<>(handle, mid)));
break;
case THREAD_DUMP:
HyracksClientInterfaceFunctions.ThreadDumpFunction tdf = (HyracksClientInterfaceFunctions.ThreadDumpFunction) fn;
ccs.getWorkQueue().schedule(new GetThreadDumpWork(ccs, tdf.getNode(), new IPCResponder<String>(handle, mid)));
break;
default:
try {
handle.send(mid, null, new IllegalArgumentException("Unknown function " + fn.getFunctionId()));
} catch (IPCException e) {
LOGGER.log(Level.WARNING, "Error sending Unknown function response", e);
}
}
}
Aggregations