Search in sources :

Example 11 with ZFrame

use of org.zeromq.ZFrame in project jeromq by zeromq.

the class ticlient method serviceCall.

static ZMsg serviceCall(mdcliapi session, String service, ZMsg request) {
    ZMsg reply = session.send(service, request);
    if (reply != null) {
        ZFrame status = reply.pop();
        if (status.streq("200")) {
            status.destroy();
            return reply;
        } else if (status.streq("400")) {
            System.out.println("E: client fatal error, aborting");
        } else if (status.streq("500")) {
            System.out.println("E: server fatal error, aborting");
        }
        reply.destroy();
    }
    //  Didn't succeed; don't care why not
    return null;
}
Also used : ZFrame(org.zeromq.ZFrame) ZMsg(org.zeromq.ZMsg)

Example 12 with ZFrame

use of org.zeromq.ZFrame in project jeromq by zeromq.

the class ticlient method main.

public static void main(String[] args) throws Exception {
    boolean verbose = (args.length > 0 && args[0].equals("-v"));
    mdcliapi session = new mdcliapi("tcp://localhost:5555", verbose);
    //  1. Send 'echo' request to Titanic
    ZMsg request = new ZMsg();
    request.add("echo");
    request.add("Hello world");
    ZMsg reply = serviceCall(session, "titanic.request", request);
    ZFrame uuid = null;
    if (reply != null) {
        uuid = reply.pop();
        reply.destroy();
        uuid.print("I: request UUID ");
    }
    //  2. Wait until we get a reply
    while (!Thread.currentThread().isInterrupted()) {
        Thread.sleep(100);
        request = new ZMsg();
        request.add(uuid.duplicate());
        reply = serviceCall(session, "titanic.reply", request);
        if (reply != null) {
            String replyString = reply.getLast().toString();
            System.out.printf("Reply: %s\n", replyString);
            reply.destroy();
            //  3. Close request
            request = new ZMsg();
            request.add(uuid.duplicate());
            reply = serviceCall(session, "titanic.close", request);
            reply.destroy();
            break;
        } else {
            System.out.println("I: no reply yet, trying again...");
            //  Try again in 5 seconds
            Thread.sleep(5000);
        }
    }
    uuid.destroy();
    session.destroy();
}
Also used : ZFrame(org.zeromq.ZFrame) ZMsg(org.zeromq.ZMsg)

Example 13 with ZFrame

use of org.zeromq.ZFrame in project jeromq by zeromq.

the class titanic method serviceSuccess.

//  .split try to call a service
//  Here, we first check if the requested MDP service is defined or not,
//  using a MMI lookup to the Majordomo broker. If the service exists,
//  we send a request and wait for a reply using the conventional MDP
//  client API. This is not meant to be fast, just very simple:
static boolean serviceSuccess(String uuid) {
    //  Load request message, service will be first frame
    String filename = requestFilename(uuid);
    //  If the client already closed request, treat as successful
    if (!new File(filename).exists())
        return true;
    DataInputStream file = null;
    ZMsg request;
    try {
        file = new DataInputStream(new FileInputStream(filename));
        request = ZMsg.load(file);
    } catch (IOException e) {
        e.printStackTrace();
        return true;
    } finally {
        try {
            if (file != null)
                file.close();
        } catch (IOException e) {
        }
    }
    ZFrame service = request.pop();
    String serviceName = service.toString();
    //  Create MDP client session with short timeout
    mdcliapi client = new mdcliapi("tcp://localhost:5555", false);
    //  1 sec
    client.setTimeout(1000);
    //  only 1 retry
    client.setRetries(1);
    //  Use MMI protocol to check if service is available
    ZMsg mmiRequest = new ZMsg();
    mmiRequest.add(service);
    ZMsg mmiReply = client.send("mmi.service", mmiRequest);
    boolean serviceOK = (mmiReply != null && mmiReply.getFirst().toString().equals("200"));
    mmiReply.destroy();
    boolean result = false;
    if (serviceOK) {
        ZMsg reply = client.send(serviceName, request);
        if (reply != null) {
            filename = replyFilename(uuid);
            DataOutputStream ofile = null;
            try {
                ofile = new DataOutputStream(new FileOutputStream(filename));
                ZMsg.save(reply, ofile);
            } catch (IOException e) {
                e.printStackTrace();
                return true;
            } finally {
                try {
                    if (file != null)
                        file.close();
                } catch (IOException e) {
                }
            }
            result = true;
        }
        reply.destroy();
        ;
    } else
        request.destroy();
    client.destroy();
    return result;
}
Also used : ZFrame(org.zeromq.ZFrame) DataOutputStream(java.io.DataOutputStream) FileOutputStream(java.io.FileOutputStream) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) ZMsg(org.zeromq.ZMsg) FileInputStream(java.io.FileInputStream)

Example 14 with ZFrame

use of org.zeromq.ZFrame in project jeromq by zeromq.

the class peering2 method main.

//  The main task begins by setting-up its frontend and backend sockets
//  and then starting its client and worker tasks:
public static void main(String[] argv) {
    //
    if (argv.length < 1) {
        System.out.println("syntax: peering2 me {you}");
        System.exit(-1);
    }
    self = argv[0];
    System.out.printf("I: preparing broker at %s\n", self);
    Random rand = new Random(System.nanoTime());
    ZContext ctx = new ZContext();
    //  Bind cloud frontend to endpoint
    Socket cloudfe = ctx.createSocket(ZMQ.ROUTER);
    cloudfe.setIdentity(self.getBytes(ZMQ.CHARSET));
    cloudfe.bind(String.format("ipc://%s-cloud.ipc", self));
    //  Connect cloud backend to all peers
    Socket cloudbe = ctx.createSocket(ZMQ.ROUTER);
    cloudbe.setIdentity(self.getBytes(ZMQ.CHARSET));
    int argn;
    for (argn = 1; argn < argv.length; argn++) {
        String peer = argv[argn];
        System.out.printf("I: connecting to cloud forintend at '%s'\n", peer);
        cloudbe.connect(String.format("ipc://%s-cloud.ipc", peer));
    }
    //  Prepare local frontend and backend
    Socket localfe = ctx.createSocket(ZMQ.ROUTER);
    localfe.bind(String.format("ipc://%s-localfe.ipc", self));
    Socket localbe = ctx.createSocket(ZMQ.ROUTER);
    localbe.bind(String.format("ipc://%s-localbe.ipc", self));
    //  Get user to tell us when we can start
    System.out.println("Press Enter when all brokers are started: ");
    try {
        System.in.read();
    } catch (IOException e) {
        e.printStackTrace();
    }
    //  Start local workers
    int worker_nbr;
    for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) new worker_task().start();
    //  Start local clients
    int client_nbr;
    for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) new client_task().start();
    //  Here we handle the request-reply flow. We're using the LRU approach
    //  to poll workers at all times, and clients only when there are one or
    //  more workers available.
    //  Least recently used queue of available workers
    int capacity = 0;
    ArrayList<ZFrame> workers = new ArrayList<ZFrame>();
    Poller backends = ctx.createPoller(2);
    backends.register(localbe, Poller.POLLIN);
    backends.register(cloudbe, Poller.POLLIN);
    Poller frontends = ctx.createPoller(2);
    frontends.register(localfe, Poller.POLLIN);
    frontends.register(cloudfe, Poller.POLLIN);
    while (true) {
        //  First, route any waiting replies from workers
        //  If we have no workers anyhow, wait indefinitely
        int rc = backends.poll(capacity > 0 ? 1000 : -1);
        if (rc == -1)
            //  Interrupted
            break;
        //  Handle reply from local worker
        ZMsg msg = null;
        if (backends.pollin(0)) {
            msg = ZMsg.recvMsg(localbe);
            if (msg == null)
                //  Interrupted
                break;
            ZFrame address = msg.unwrap();
            workers.add(address);
            capacity++;
            //  If it's READY, don't route the message any further
            ZFrame frame = msg.getFirst();
            if (new String(frame.getData(), ZMQ.CHARSET).equals(WORKER_READY)) {
                msg.destroy();
                msg = null;
            }
        } else //  Or handle reply from peer broker
        if (backends.pollin(1)) {
            msg = ZMsg.recvMsg(cloudbe);
            if (msg == null)
                //  Interrupted
                break;
            //  We don't use peer broker address for anything
            ZFrame address = msg.unwrap();
            address.destroy();
        }
        //  Route reply to cloud if it's addressed to a broker
        for (argn = 1; msg != null && argn < argv.length; argn++) {
            byte[] data = msg.getFirst().getData();
            if (argv[argn].equals(new String(data, ZMQ.CHARSET))) {
                msg.send(cloudfe);
                msg = null;
            }
        }
        //  Route reply to client if we still need to
        if (msg != null)
            msg.send(localfe);
        while (capacity > 0) {
            rc = frontends.poll(0);
            assert (rc >= 0);
            int reroutable = 0;
            //  We'll do peer brokers first, to prevent starvation
            if (frontends.pollin(1)) {
                msg = ZMsg.recvMsg(cloudfe);
                reroutable = 0;
            } else if (frontends.pollin(0)) {
                msg = ZMsg.recvMsg(localfe);
                reroutable = 1;
            } else
                //  No work, go back to backends
                break;
            //
            if (reroutable != 0 && argv.length > 1 && rand.nextInt(5) == 0) {
                //  Route to random broker peer
                int random_peer = rand.nextInt(argv.length - 1) + 1;
                msg.push(argv[random_peer]);
                msg.send(cloudbe);
            } else {
                ZFrame frame = workers.remove(0);
                msg.wrap(frame);
                msg.send(localbe);
                capacity--;
            }
        }
    }
    //  When we're done, clean up properly
    while (workers.size() > 0) {
        ZFrame frame = workers.remove(0);
        frame.destroy();
    }
    ctx.destroy();
}
Also used : ArrayList(java.util.ArrayList) IOException(java.io.IOException) ZContext(org.zeromq.ZContext) ZMsg(org.zeromq.ZMsg) ZFrame(org.zeromq.ZFrame) Random(java.util.Random) Socket(org.zeromq.ZMQ.Socket) Poller(org.zeromq.ZMQ.Poller)

Example 15 with ZFrame

use of org.zeromq.ZFrame in project jeromq by zeromq.

the class peering3 method main.

//  The main task begins by setting-up all its sockets. The local frontend
//  talks to clients, and our local backend talks to workers. The cloud
//  frontend talks to peer brokers as if they were clients, and the cloud
//  backend talks to peer brokers as if they were workers. The state
//  backend publishes regular state messages, and the state frontend
//  subscribes to all state backends to collect these messages. Finally,
//  we use a PULL monitor socket to collect printable messages from tasks:
public static void main(String[] argv) {
    //
    if (argv.length < 1) {
        System.out.println("syntax: peering3 me {you}");
        System.exit(-1);
    }
    self = argv[0];
    System.out.printf("I: preparing broker at %s\n", self);
    Random rand = new Random(System.nanoTime());
    ZContext ctx = new ZContext();
    //  Prepare local frontend and backend
    Socket localfe = ctx.createSocket(ZMQ.ROUTER);
    localfe.bind(String.format("ipc://%s-localfe.ipc", self));
    Socket localbe = ctx.createSocket(ZMQ.ROUTER);
    localbe.bind(String.format("ipc://%s-localbe.ipc", self));
    //  Bind cloud frontend to endpoint
    Socket cloudfe = ctx.createSocket(ZMQ.ROUTER);
    cloudfe.setIdentity(self.getBytes(ZMQ.CHARSET));
    cloudfe.bind(String.format("ipc://%s-cloud.ipc", self));
    //  Connect cloud backend to all peers
    Socket cloudbe = ctx.createSocket(ZMQ.ROUTER);
    cloudbe.setIdentity(self.getBytes(ZMQ.CHARSET));
    int argn;
    for (argn = 1; argn < argv.length; argn++) {
        String peer = argv[argn];
        System.out.printf("I: connecting to cloud forintend at '%s'\n", peer);
        cloudbe.connect(String.format("ipc://%s-cloud.ipc", peer));
    }
    //  Bind state backend to endpoint
    Socket statebe = ctx.createSocket(ZMQ.PUB);
    statebe.bind(String.format("ipc://%s-state.ipc", self));
    //  Connect statefe to all peers
    Socket statefe = ctx.createSocket(ZMQ.SUB);
    statefe.subscribe(ZMQ.SUBSCRIPTION_ALL);
    for (argn = 1; argn < argv.length; argn++) {
        String peer = argv[argn];
        System.out.printf("I: connecting to state backend at '%s'\n", peer);
        statefe.connect(String.format("ipc://%s-state.ipc", peer));
    }
    //  Prepare monitor socket
    Socket monitor = ctx.createSocket(ZMQ.PULL);
    monitor.bind(String.format("ipc://%s-monitor.ipc", self));
    //  Start local workers
    int worker_nbr;
    for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) new worker_task().start();
    //  Start local clients
    int client_nbr;
    for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) new client_task().start();
    //  Queue of available workers
    int localCapacity = 0;
    int cloudCapacity = 0;
    ArrayList<ZFrame> workers = new ArrayList<ZFrame>();
    //  The main loop has two parts. First we poll workers and our two service
    //  sockets (statefe and monitor), in any case. If we have no ready workers,
    //  there's no point in looking at incoming requests. These can remain on
    //  their internal 0MQ queues:
    Poller primary = ctx.createPoller(4);
    primary.register(localbe, Poller.POLLIN);
    primary.register(cloudbe, Poller.POLLIN);
    primary.register(statefe, Poller.POLLIN);
    primary.register(monitor, Poller.POLLIN);
    Poller secondary = ctx.createPoller(2);
    secondary.register(localfe, Poller.POLLIN);
    secondary.register(cloudfe, Poller.POLLIN);
    while (true) {
        //  First, route any waiting replies from workers
        //  If we have no workers anyhow, wait indefinitely
        int rc = primary.poll(localCapacity > 0 ? 1000 : -1);
        if (rc == -1)
            //  Interrupted
            break;
        //  Track if capacity changes during this iteration
        int previous = localCapacity;
        //  Handle reply from local worker
        ZMsg msg = null;
        if (primary.pollin(0)) {
            msg = ZMsg.recvMsg(localbe);
            if (msg == null)
                //  Interrupted
                break;
            ZFrame address = msg.unwrap();
            workers.add(address);
            localCapacity++;
            //  If it's READY, don't route the message any further
            ZFrame frame = msg.getFirst();
            if (new String(frame.getData(), ZMQ.CHARSET).equals(WORKER_READY)) {
                msg.destroy();
                msg = null;
            }
        } else //  Or handle reply from peer broker
        if (primary.pollin(1)) {
            msg = ZMsg.recvMsg(cloudbe);
            if (msg == null)
                //  Interrupted
                break;
            //  We don't use peer broker address for anything
            ZFrame address = msg.unwrap();
            address.destroy();
        }
        //  Route reply to cloud if it's addressed to a broker
        for (argn = 1; msg != null && argn < argv.length; argn++) {
            byte[] data = msg.getFirst().getData();
            if (argv[argn].equals(new String(data, ZMQ.CHARSET))) {
                msg.send(cloudfe);
                msg = null;
            }
        }
        //  Route reply to client if we still need to
        if (msg != null)
            msg.send(localfe);
        if (primary.pollin(2)) {
            String peer = statefe.recvStr();
            String status = statefe.recvStr();
            cloudCapacity = Integer.parseInt(status);
        }
        if (primary.pollin(3)) {
            String status = monitor.recvStr();
            System.out.println(status);
        }
        while (localCapacity + cloudCapacity > 0) {
            rc = secondary.poll(0);
            assert (rc >= 0);
            if (secondary.pollin(0)) {
                msg = ZMsg.recvMsg(localfe);
            } else if (localCapacity > 0 && secondary.pollin(1)) {
                msg = ZMsg.recvMsg(cloudfe);
            } else
                //  No work, go back to backends
                break;
            if (localCapacity > 0) {
                ZFrame frame = workers.remove(0);
                msg.wrap(frame);
                msg.send(localbe);
                localCapacity--;
            } else {
                //  Route to random broker peer
                int random_peer = rand.nextInt(argv.length - 1) + 1;
                msg.push(argv[random_peer]);
                msg.send(cloudbe);
            }
        }
        if (localCapacity != previous) {
            //  We stick our own address onto the envelope
            statebe.sendMore(self);
            //  Broadcast new capacity
            statebe.send(String.format("%d", localCapacity), 0);
        }
    }
    //  When we're done, clean up properly
    while (workers.size() > 0) {
        ZFrame frame = workers.remove(0);
        frame.destroy();
    }
    ctx.destroy();
}
Also used : ArrayList(java.util.ArrayList) ZContext(org.zeromq.ZContext) ZMsg(org.zeromq.ZMsg) ZFrame(org.zeromq.ZFrame) Random(java.util.Random) Socket(org.zeromq.ZMQ.Socket) Poller(org.zeromq.ZMQ.Poller)

Aggregations

ZFrame (org.zeromq.ZFrame)27 ZMsg (org.zeromq.ZMsg)20 ZContext (org.zeromq.ZContext)13 Socket (org.zeromq.ZMQ.Socket)13 Poller (org.zeromq.ZMQ.Poller)7 ArrayList (java.util.ArrayList)4 Random (java.util.Random)4 ZMQ (org.zeromq.ZMQ)4 PollItem (org.zeromq.ZMQ.PollItem)3 IOException (java.io.IOException)2 ZLoop (org.zeromq.ZLoop)2 DataInputStream (java.io.DataInputStream)1 DataOutputStream (java.io.DataOutputStream)1 File (java.io.File)1 FileInputStream (java.io.FileInputStream)1 FileOutputStream (java.io.FileOutputStream)1 RandomAccessFile (java.io.RandomAccessFile)1 HashMap (java.util.HashMap)1 LinkedList (java.util.LinkedList)1