Search in sources :

Example 1 with IPCException

use of org.apache.hyracks.ipc.exceptions.IPCException in project asterixdb by apache.

the class ClientInterfaceIPCI method deliverIncomingMessage.

@Override
public void deliverIncomingMessage(IIPCHandle handle, long mid, long rmid, Object payload, Exception exception) {
    HyracksClientInterfaceFunctions.Function fn = (HyracksClientInterfaceFunctions.Function) payload;
    switch(fn.getFunctionId()) {
        case GET_CLUSTER_CONTROLLER_INFO:
            try {
                handle.send(mid, ccs.getClusterControllerInfo(), null);
            } catch (IPCException e) {
                LOGGER.log(Level.WARNING, "Error sending response to GET_CLUSTER_CONTROLLER_INFO request", e);
            }
            break;
        case GET_JOB_STATUS:
            HyracksClientInterfaceFunctions.GetJobStatusFunction gjsf = (HyracksClientInterfaceFunctions.GetJobStatusFunction) fn;
            ccs.getWorkQueue().schedule(new GetJobStatusWork(ccs.getJobManager(), gjsf.getJobId(), new IPCResponder<>(handle, mid)));
            break;
        case GET_JOB_INFO:
            HyracksClientInterfaceFunctions.GetJobInfoFunction gjif = (HyracksClientInterfaceFunctions.GetJobInfoFunction) fn;
            ccs.getWorkQueue().schedule(new GetJobInfoWork(ccs.getJobManager(), gjif.getJobId(), new IPCResponder<JobInfo>(handle, mid)));
            break;
        case DISTRIBUTE_JOB:
            HyracksClientInterfaceFunctions.DistributeJobFunction djf = (HyracksClientInterfaceFunctions.DistributeJobFunction) fn;
            ccs.getWorkQueue().schedule(new DistributeJobWork(ccs, djf.getACGGFBytes(), jobIdFactory.create(), new IPCResponder<JobId>(handle, mid)));
            break;
        case DESTROY_JOB:
            HyracksClientInterfaceFunctions.DestroyJobFunction dsjf = (HyracksClientInterfaceFunctions.DestroyJobFunction) fn;
            ccs.getWorkQueue().schedule(new DestroyJobWork(ccs, dsjf.getJobId(), new IPCResponder<JobId>(handle, mid)));
            break;
        case CANCEL_JOB:
            HyracksClientInterfaceFunctions.CancelJobFunction cjf = (HyracksClientInterfaceFunctions.CancelJobFunction) fn;
            ccs.getWorkQueue().schedule(new CancelJobWork(ccs.getJobManager(), cjf.getJobId(), new IPCResponder<Void>(handle, mid)));
            break;
        case START_JOB:
            HyracksClientInterfaceFunctions.StartJobFunction sjf = (HyracksClientInterfaceFunctions.StartJobFunction) fn;
            JobId jobId = sjf.getJobId();
            byte[] acggfBytes = null;
            boolean predistributed = false;
            if (jobId == null) {
                //The job is new
                jobId = jobIdFactory.create();
                acggfBytes = sjf.getACGGFBytes();
            } else {
                //The job has been predistributed. We don't need to send an ActivityClusterGraph
                predistributed = true;
            }
            ccs.getWorkQueue().schedule(new JobStartWork(ccs, sjf.getDeploymentId(), acggfBytes, sjf.getJobFlags(), jobId, new IPCResponder<JobId>(handle, mid), predistributed));
            break;
        case GET_DATASET_DIRECTORY_SERIVICE_INFO:
            ccs.getWorkQueue().schedule(new GetDatasetDirectoryServiceInfoWork(ccs, new IPCResponder<NetworkAddress>(handle, mid)));
            break;
        case GET_DATASET_RESULT_STATUS:
            HyracksClientInterfaceFunctions.GetDatasetResultStatusFunction gdrsf = (HyracksClientInterfaceFunctions.GetDatasetResultStatusFunction) fn;
            ccs.getWorkQueue().schedule(new GetResultStatusWork(ccs, gdrsf.getJobId(), gdrsf.getResultSetId(), new IPCResponder<Status>(handle, mid)));
            break;
        case GET_DATASET_RESULT_LOCATIONS:
            HyracksClientInterfaceFunctions.GetDatasetResultLocationsFunction gdrlf = (HyracksClientInterfaceFunctions.GetDatasetResultLocationsFunction) fn;
            ccs.getWorkQueue().schedule(new GetResultPartitionLocationsWork(ccs, gdrlf.getJobId(), gdrlf.getResultSetId(), gdrlf.getKnownRecords(), new IPCResponder<>(handle, mid)));
            break;
        case WAIT_FOR_COMPLETION:
            HyracksClientInterfaceFunctions.WaitForCompletionFunction wfcf = (HyracksClientInterfaceFunctions.WaitForCompletionFunction) fn;
            ccs.getWorkQueue().schedule(new WaitForJobCompletionWork(ccs, wfcf.getJobId(), new IPCResponder<>(handle, mid)));
            break;
        case GET_NODE_CONTROLLERS_INFO:
            ccs.getWorkQueue().schedule(new GetNodeControllersInfoWork(ccs.getNodeManager(), new IPCResponder<>(handle, mid)));
            break;
        case GET_CLUSTER_TOPOLOGY:
            try {
                handle.send(mid, ccs.getCCContext().getClusterTopology(), null);
            } catch (IPCException e) {
                LOGGER.log(Level.WARNING, "Error sending response to GET_CLUSTER_TOPOLOGY request", e);
            }
            break;
        case CLI_DEPLOY_BINARY:
            HyracksClientInterfaceFunctions.CliDeployBinaryFunction dbf = (HyracksClientInterfaceFunctions.CliDeployBinaryFunction) fn;
            ccs.getWorkQueue().schedule(new CliDeployBinaryWork(ccs, dbf.getBinaryURLs(), dbf.getDeploymentId(), new IPCResponder<>(handle, mid)));
            break;
        case CLI_UNDEPLOY_BINARY:
            HyracksClientInterfaceFunctions.CliUnDeployBinaryFunction udbf = (HyracksClientInterfaceFunctions.CliUnDeployBinaryFunction) fn;
            ccs.getWorkQueue().schedule(new CliUnDeployBinaryWork(ccs, udbf.getDeploymentId(), new IPCResponder<>(handle, mid)));
            break;
        case CLUSTER_SHUTDOWN:
            HyracksClientInterfaceFunctions.ClusterShutdownFunction csf = (HyracksClientInterfaceFunctions.ClusterShutdownFunction) fn;
            ccs.getWorkQueue().schedule(new ClusterShutdownWork(ccs, csf.isTerminateNCService(), new IPCResponder<>(handle, mid)));
            break;
        case GET_NODE_DETAILS_JSON:
            HyracksClientInterfaceFunctions.GetNodeDetailsJSONFunction gndjf = (HyracksClientInterfaceFunctions.GetNodeDetailsJSONFunction) fn;
            ccs.getWorkQueue().schedule(new GetNodeDetailsJSONWork(ccs.getNodeManager(), ccs.getCCConfig(), gndjf.getNodeId(), gndjf.isIncludeStats(), gndjf.isIncludeConfig(), new IPCResponder<>(handle, mid)));
            break;
        case THREAD_DUMP:
            HyracksClientInterfaceFunctions.ThreadDumpFunction tdf = (HyracksClientInterfaceFunctions.ThreadDumpFunction) fn;
            ccs.getWorkQueue().schedule(new GetThreadDumpWork(ccs, tdf.getNode(), new IPCResponder<String>(handle, mid)));
            break;
        default:
            try {
                handle.send(mid, null, new IllegalArgumentException("Unknown function " + fn.getFunctionId()));
            } catch (IPCException e) {
                LOGGER.log(Level.WARNING, "Error sending Unknown function response", e);
            }
    }
}
Also used : HyracksClientInterfaceFunctions(org.apache.hyracks.api.client.HyracksClientInterfaceFunctions) IPCResponder(org.apache.hyracks.control.common.work.IPCResponder) DistributeJobWork(org.apache.hyracks.control.cc.work.DistributeJobWork) WaitForJobCompletionWork(org.apache.hyracks.control.cc.work.WaitForJobCompletionWork) IPCException(org.apache.hyracks.ipc.exceptions.IPCException) GetResultStatusWork(org.apache.hyracks.control.cc.work.GetResultStatusWork) ClusterShutdownWork(org.apache.hyracks.control.cc.work.ClusterShutdownWork) GetNodeDetailsJSONWork(org.apache.hyracks.control.cc.work.GetNodeDetailsJSONWork) GetJobStatusWork(org.apache.hyracks.control.cc.work.GetJobStatusWork) GetNodeControllersInfoWork(org.apache.hyracks.control.cc.work.GetNodeControllersInfoWork) CancelJobWork(org.apache.hyracks.control.cc.work.CancelJobWork) GetResultPartitionLocationsWork(org.apache.hyracks.control.cc.work.GetResultPartitionLocationsWork) GetDatasetDirectoryServiceInfoWork(org.apache.hyracks.control.cc.work.GetDatasetDirectoryServiceInfoWork) JobId(org.apache.hyracks.api.job.JobId) JobStartWork(org.apache.hyracks.control.cc.work.JobStartWork) CliUnDeployBinaryWork(org.apache.hyracks.control.cc.work.CliUnDeployBinaryWork) DestroyJobWork(org.apache.hyracks.control.cc.work.DestroyJobWork) GetJobInfoWork(org.apache.hyracks.control.cc.work.GetJobInfoWork) GetThreadDumpWork(org.apache.hyracks.control.cc.work.GetThreadDumpWork) CliDeployBinaryWork(org.apache.hyracks.control.cc.work.CliDeployBinaryWork)

Example 2 with IPCException

use of org.apache.hyracks.ipc.exceptions.IPCException in project asterixdb by apache.

the class IPCTest method createServerIPCSystem.

private IPCSystem createServerIPCSystem() throws IOException {
    final Executor executor = Executors.newCachedThreadPool();
    IIPCI ipci = new IIPCI() {

        @Override
        public void deliverIncomingMessage(final IIPCHandle handle, final long mid, long rmid, final Object payload, Exception exception) {
            executor.execute(new Runnable() {

                @Override
                public void run() {
                    Object result = null;
                    Exception exception = null;
                    try {
                        Integer i = (Integer) payload;
                        result = i.intValue() * 2;
                    } catch (Exception e) {
                        exception = e;
                    }
                    try {
                        handle.send(mid, result, exception);
                    } catch (IPCException e) {
                        e.printStackTrace();
                    }
                }
            });
        }
    };
    return new IPCSystem(new InetSocketAddress("127.0.0.1", 0), ipci, new JavaSerializationBasedPayloadSerializerDeserializer());
}
Also used : IIPCHandle(org.apache.hyracks.ipc.api.IIPCHandle) IIPCI(org.apache.hyracks.ipc.api.IIPCI) Executor(java.util.concurrent.Executor) JavaSerializationBasedPayloadSerializerDeserializer(org.apache.hyracks.ipc.impl.JavaSerializationBasedPayloadSerializerDeserializer) InetSocketAddress(java.net.InetSocketAddress) IPCException(org.apache.hyracks.ipc.exceptions.IPCException) IOException(java.io.IOException) IPCException(org.apache.hyracks.ipc.exceptions.IPCException) IPCSystem(org.apache.hyracks.ipc.impl.IPCSystem)

Example 3 with IPCException

use of org.apache.hyracks.ipc.exceptions.IPCException in project asterixdb by apache.

the class ClusterShutdownWork method doRun.

@Override
public void doRun() {
    try {
        if (ccs.getShutdownRun() != null) {
            throw new IPCException("Shutdown already in progress");
        }
        INodeManager nodeManager = ccs.getNodeManager();
        Collection<String> nodeIds = nodeManager.getAllNodeIds();
        /**
             * set up our listener for the node ACKs
             */
        final ShutdownRun shutdownStatus = new ShutdownRun(nodeIds);
        // set up the CC to listen for it
        ccs.setShutdownRun(shutdownStatus);
        /**
             * Shutdown all the nodes...
             */
        nodeManager.apply(this::shutdownNode);
        ccs.getExecutor().execute(new Runnable() {

            @Override
            public void run() {
                try {
                    /*
                         * wait for all our acks
                         */
                    LOGGER.info("Waiting for NCs to shutdown...");
                    boolean cleanShutdown = shutdownStatus.waitForCompletion();
                    if (!cleanShutdown) {
                        /*
                             * best effort - just exit, user will have to kill misbehaving NCs
                             */
                        LOGGER.severe("Clean shutdown of NCs timed out- giving up; unresponsive nodes: " + shutdownStatus.getRemainingNodes());
                    }
                    callback.setValue(cleanShutdown);
                    ccs.stop(terminateNCService);
                    LOGGER.info("JVM Exiting.. Bye!");
                    Runtime rt = Runtime.getRuntime();
                    rt.exit(cleanShutdown ? 0 : 1);
                } catch (Exception e) {
                    callback.setException(e);
                }
            }
        });
    } catch (Exception e) {
        callback.setException(e);
    }
}
Also used : INodeManager(org.apache.hyracks.control.cc.cluster.INodeManager) ShutdownRun(org.apache.hyracks.control.common.shutdown.ShutdownRun) IPCException(org.apache.hyracks.ipc.exceptions.IPCException) IPCException(org.apache.hyracks.ipc.exceptions.IPCException)

Aggregations

IPCException (org.apache.hyracks.ipc.exceptions.IPCException)3 IOException (java.io.IOException)1 InetSocketAddress (java.net.InetSocketAddress)1 Executor (java.util.concurrent.Executor)1 HyracksClientInterfaceFunctions (org.apache.hyracks.api.client.HyracksClientInterfaceFunctions)1 JobId (org.apache.hyracks.api.job.JobId)1 INodeManager (org.apache.hyracks.control.cc.cluster.INodeManager)1 CancelJobWork (org.apache.hyracks.control.cc.work.CancelJobWork)1 CliDeployBinaryWork (org.apache.hyracks.control.cc.work.CliDeployBinaryWork)1 CliUnDeployBinaryWork (org.apache.hyracks.control.cc.work.CliUnDeployBinaryWork)1 ClusterShutdownWork (org.apache.hyracks.control.cc.work.ClusterShutdownWork)1 DestroyJobWork (org.apache.hyracks.control.cc.work.DestroyJobWork)1 DistributeJobWork (org.apache.hyracks.control.cc.work.DistributeJobWork)1 GetDatasetDirectoryServiceInfoWork (org.apache.hyracks.control.cc.work.GetDatasetDirectoryServiceInfoWork)1 GetJobInfoWork (org.apache.hyracks.control.cc.work.GetJobInfoWork)1 GetJobStatusWork (org.apache.hyracks.control.cc.work.GetJobStatusWork)1 GetNodeControllersInfoWork (org.apache.hyracks.control.cc.work.GetNodeControllersInfoWork)1 GetNodeDetailsJSONWork (org.apache.hyracks.control.cc.work.GetNodeDetailsJSONWork)1 GetResultPartitionLocationsWork (org.apache.hyracks.control.cc.work.GetResultPartitionLocationsWork)1 GetResultStatusWork (org.apache.hyracks.control.cc.work.GetResultStatusWork)1