use of org.apache.hyracks.ipc.exceptions.IPCException in project asterixdb by apache.
the class ClientInterfaceIPCI method deliverIncomingMessage.
@Override
public void deliverIncomingMessage(IIPCHandle handle, long mid, long rmid, Object payload, Exception exception) {
HyracksClientInterfaceFunctions.Function fn = (HyracksClientInterfaceFunctions.Function) payload;
switch(fn.getFunctionId()) {
case GET_CLUSTER_CONTROLLER_INFO:
try {
handle.send(mid, ccs.getClusterControllerInfo(), null);
} catch (IPCException e) {
LOGGER.log(Level.WARNING, "Error sending response to GET_CLUSTER_CONTROLLER_INFO request", e);
}
break;
case GET_JOB_STATUS:
HyracksClientInterfaceFunctions.GetJobStatusFunction gjsf = (HyracksClientInterfaceFunctions.GetJobStatusFunction) fn;
ccs.getWorkQueue().schedule(new GetJobStatusWork(ccs.getJobManager(), gjsf.getJobId(), new IPCResponder<>(handle, mid)));
break;
case GET_JOB_INFO:
HyracksClientInterfaceFunctions.GetJobInfoFunction gjif = (HyracksClientInterfaceFunctions.GetJobInfoFunction) fn;
ccs.getWorkQueue().schedule(new GetJobInfoWork(ccs.getJobManager(), gjif.getJobId(), new IPCResponder<JobInfo>(handle, mid)));
break;
case DISTRIBUTE_JOB:
HyracksClientInterfaceFunctions.DistributeJobFunction djf = (HyracksClientInterfaceFunctions.DistributeJobFunction) fn;
ccs.getWorkQueue().schedule(new DistributeJobWork(ccs, djf.getACGGFBytes(), jobIdFactory.create(), new IPCResponder<JobId>(handle, mid)));
break;
case DESTROY_JOB:
HyracksClientInterfaceFunctions.DestroyJobFunction dsjf = (HyracksClientInterfaceFunctions.DestroyJobFunction) fn;
ccs.getWorkQueue().schedule(new DestroyJobWork(ccs, dsjf.getJobId(), new IPCResponder<JobId>(handle, mid)));
break;
case CANCEL_JOB:
HyracksClientInterfaceFunctions.CancelJobFunction cjf = (HyracksClientInterfaceFunctions.CancelJobFunction) fn;
ccs.getWorkQueue().schedule(new CancelJobWork(ccs.getJobManager(), cjf.getJobId(), new IPCResponder<Void>(handle, mid)));
break;
case START_JOB:
HyracksClientInterfaceFunctions.StartJobFunction sjf = (HyracksClientInterfaceFunctions.StartJobFunction) fn;
JobId jobId = sjf.getJobId();
byte[] acggfBytes = null;
boolean predistributed = false;
if (jobId == null) {
//The job is new
jobId = jobIdFactory.create();
acggfBytes = sjf.getACGGFBytes();
} else {
//The job has been predistributed. We don't need to send an ActivityClusterGraph
predistributed = true;
}
ccs.getWorkQueue().schedule(new JobStartWork(ccs, sjf.getDeploymentId(), acggfBytes, sjf.getJobFlags(), jobId, new IPCResponder<JobId>(handle, mid), predistributed));
break;
case GET_DATASET_DIRECTORY_SERIVICE_INFO:
ccs.getWorkQueue().schedule(new GetDatasetDirectoryServiceInfoWork(ccs, new IPCResponder<NetworkAddress>(handle, mid)));
break;
case GET_DATASET_RESULT_STATUS:
HyracksClientInterfaceFunctions.GetDatasetResultStatusFunction gdrsf = (HyracksClientInterfaceFunctions.GetDatasetResultStatusFunction) fn;
ccs.getWorkQueue().schedule(new GetResultStatusWork(ccs, gdrsf.getJobId(), gdrsf.getResultSetId(), new IPCResponder<Status>(handle, mid)));
break;
case GET_DATASET_RESULT_LOCATIONS:
HyracksClientInterfaceFunctions.GetDatasetResultLocationsFunction gdrlf = (HyracksClientInterfaceFunctions.GetDatasetResultLocationsFunction) fn;
ccs.getWorkQueue().schedule(new GetResultPartitionLocationsWork(ccs, gdrlf.getJobId(), gdrlf.getResultSetId(), gdrlf.getKnownRecords(), new IPCResponder<>(handle, mid)));
break;
case WAIT_FOR_COMPLETION:
HyracksClientInterfaceFunctions.WaitForCompletionFunction wfcf = (HyracksClientInterfaceFunctions.WaitForCompletionFunction) fn;
ccs.getWorkQueue().schedule(new WaitForJobCompletionWork(ccs, wfcf.getJobId(), new IPCResponder<>(handle, mid)));
break;
case GET_NODE_CONTROLLERS_INFO:
ccs.getWorkQueue().schedule(new GetNodeControllersInfoWork(ccs.getNodeManager(), new IPCResponder<>(handle, mid)));
break;
case GET_CLUSTER_TOPOLOGY:
try {
handle.send(mid, ccs.getCCContext().getClusterTopology(), null);
} catch (IPCException e) {
LOGGER.log(Level.WARNING, "Error sending response to GET_CLUSTER_TOPOLOGY request", e);
}
break;
case CLI_DEPLOY_BINARY:
HyracksClientInterfaceFunctions.CliDeployBinaryFunction dbf = (HyracksClientInterfaceFunctions.CliDeployBinaryFunction) fn;
ccs.getWorkQueue().schedule(new CliDeployBinaryWork(ccs, dbf.getBinaryURLs(), dbf.getDeploymentId(), new IPCResponder<>(handle, mid)));
break;
case CLI_UNDEPLOY_BINARY:
HyracksClientInterfaceFunctions.CliUnDeployBinaryFunction udbf = (HyracksClientInterfaceFunctions.CliUnDeployBinaryFunction) fn;
ccs.getWorkQueue().schedule(new CliUnDeployBinaryWork(ccs, udbf.getDeploymentId(), new IPCResponder<>(handle, mid)));
break;
case CLUSTER_SHUTDOWN:
HyracksClientInterfaceFunctions.ClusterShutdownFunction csf = (HyracksClientInterfaceFunctions.ClusterShutdownFunction) fn;
ccs.getWorkQueue().schedule(new ClusterShutdownWork(ccs, csf.isTerminateNCService(), new IPCResponder<>(handle, mid)));
break;
case GET_NODE_DETAILS_JSON:
HyracksClientInterfaceFunctions.GetNodeDetailsJSONFunction gndjf = (HyracksClientInterfaceFunctions.GetNodeDetailsJSONFunction) fn;
ccs.getWorkQueue().schedule(new GetNodeDetailsJSONWork(ccs.getNodeManager(), ccs.getCCConfig(), gndjf.getNodeId(), gndjf.isIncludeStats(), gndjf.isIncludeConfig(), new IPCResponder<>(handle, mid)));
break;
case THREAD_DUMP:
HyracksClientInterfaceFunctions.ThreadDumpFunction tdf = (HyracksClientInterfaceFunctions.ThreadDumpFunction) fn;
ccs.getWorkQueue().schedule(new GetThreadDumpWork(ccs, tdf.getNode(), new IPCResponder<String>(handle, mid)));
break;
default:
try {
handle.send(mid, null, new IllegalArgumentException("Unknown function " + fn.getFunctionId()));
} catch (IPCException e) {
LOGGER.log(Level.WARNING, "Error sending Unknown function response", e);
}
}
}
use of org.apache.hyracks.ipc.exceptions.IPCException in project asterixdb by apache.
the class IPCTest method createServerIPCSystem.
private IPCSystem createServerIPCSystem() throws IOException {
final Executor executor = Executors.newCachedThreadPool();
IIPCI ipci = new IIPCI() {
@Override
public void deliverIncomingMessage(final IIPCHandle handle, final long mid, long rmid, final Object payload, Exception exception) {
executor.execute(new Runnable() {
@Override
public void run() {
Object result = null;
Exception exception = null;
try {
Integer i = (Integer) payload;
result = i.intValue() * 2;
} catch (Exception e) {
exception = e;
}
try {
handle.send(mid, result, exception);
} catch (IPCException e) {
e.printStackTrace();
}
}
});
}
};
return new IPCSystem(new InetSocketAddress("127.0.0.1", 0), ipci, new JavaSerializationBasedPayloadSerializerDeserializer());
}
use of org.apache.hyracks.ipc.exceptions.IPCException in project asterixdb by apache.
the class ClusterShutdownWork method doRun.
@Override
public void doRun() {
try {
if (ccs.getShutdownRun() != null) {
throw new IPCException("Shutdown already in progress");
}
INodeManager nodeManager = ccs.getNodeManager();
Collection<String> nodeIds = nodeManager.getAllNodeIds();
/**
* set up our listener for the node ACKs
*/
final ShutdownRun shutdownStatus = new ShutdownRun(nodeIds);
// set up the CC to listen for it
ccs.setShutdownRun(shutdownStatus);
/**
* Shutdown all the nodes...
*/
nodeManager.apply(this::shutdownNode);
ccs.getExecutor().execute(new Runnable() {
@Override
public void run() {
try {
/*
* wait for all our acks
*/
LOGGER.info("Waiting for NCs to shutdown...");
boolean cleanShutdown = shutdownStatus.waitForCompletion();
if (!cleanShutdown) {
/*
* best effort - just exit, user will have to kill misbehaving NCs
*/
LOGGER.severe("Clean shutdown of NCs timed out- giving up; unresponsive nodes: " + shutdownStatus.getRemainingNodes());
}
callback.setValue(cleanShutdown);
ccs.stop(terminateNCService);
LOGGER.info("JVM Exiting.. Bye!");
Runtime rt = Runtime.getRuntime();
rt.exit(cleanShutdown ? 0 : 1);
} catch (Exception e) {
callback.setException(e);
}
}
});
} catch (Exception e) {
callback.setException(e);
}
}
Aggregations