Search in sources :

Example 21 with ZMsg

use of org.zeromq.ZMsg in project jeromq by zeromq.

the class peering2 method main.

//  The main task begins by setting-up its frontend and backend sockets
//  and then starting its client and worker tasks:
public static void main(String[] argv) {
    //
    if (argv.length < 1) {
        System.out.println("syntax: peering2 me {you}");
        System.exit(-1);
    }
    self = argv[0];
    System.out.printf("I: preparing broker at %s\n", self);
    Random rand = new Random(System.nanoTime());
    ZContext ctx = new ZContext();
    //  Bind cloud frontend to endpoint
    Socket cloudfe = ctx.createSocket(ZMQ.ROUTER);
    cloudfe.setIdentity(self.getBytes(ZMQ.CHARSET));
    cloudfe.bind(String.format("ipc://%s-cloud.ipc", self));
    //  Connect cloud backend to all peers
    Socket cloudbe = ctx.createSocket(ZMQ.ROUTER);
    cloudbe.setIdentity(self.getBytes(ZMQ.CHARSET));
    int argn;
    for (argn = 1; argn < argv.length; argn++) {
        String peer = argv[argn];
        System.out.printf("I: connecting to cloud forintend at '%s'\n", peer);
        cloudbe.connect(String.format("ipc://%s-cloud.ipc", peer));
    }
    //  Prepare local frontend and backend
    Socket localfe = ctx.createSocket(ZMQ.ROUTER);
    localfe.bind(String.format("ipc://%s-localfe.ipc", self));
    Socket localbe = ctx.createSocket(ZMQ.ROUTER);
    localbe.bind(String.format("ipc://%s-localbe.ipc", self));
    //  Get user to tell us when we can start
    System.out.println("Press Enter when all brokers are started: ");
    try {
        System.in.read();
    } catch (IOException e) {
        e.printStackTrace();
    }
    //  Start local workers
    int worker_nbr;
    for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) new worker_task().start();
    //  Start local clients
    int client_nbr;
    for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) new client_task().start();
    //  Here we handle the request-reply flow. We're using the LRU approach
    //  to poll workers at all times, and clients only when there are one or
    //  more workers available.
    //  Least recently used queue of available workers
    int capacity = 0;
    ArrayList<ZFrame> workers = new ArrayList<ZFrame>();
    Poller backends = ctx.createPoller(2);
    backends.register(localbe, Poller.POLLIN);
    backends.register(cloudbe, Poller.POLLIN);
    Poller frontends = ctx.createPoller(2);
    frontends.register(localfe, Poller.POLLIN);
    frontends.register(cloudfe, Poller.POLLIN);
    while (true) {
        //  First, route any waiting replies from workers
        //  If we have no workers anyhow, wait indefinitely
        int rc = backends.poll(capacity > 0 ? 1000 : -1);
        if (rc == -1)
            //  Interrupted
            break;
        //  Handle reply from local worker
        ZMsg msg = null;
        if (backends.pollin(0)) {
            msg = ZMsg.recvMsg(localbe);
            if (msg == null)
                //  Interrupted
                break;
            ZFrame address = msg.unwrap();
            workers.add(address);
            capacity++;
            //  If it's READY, don't route the message any further
            ZFrame frame = msg.getFirst();
            if (new String(frame.getData(), ZMQ.CHARSET).equals(WORKER_READY)) {
                msg.destroy();
                msg = null;
            }
        } else //  Or handle reply from peer broker
        if (backends.pollin(1)) {
            msg = ZMsg.recvMsg(cloudbe);
            if (msg == null)
                //  Interrupted
                break;
            //  We don't use peer broker address for anything
            ZFrame address = msg.unwrap();
            address.destroy();
        }
        //  Route reply to cloud if it's addressed to a broker
        for (argn = 1; msg != null && argn < argv.length; argn++) {
            byte[] data = msg.getFirst().getData();
            if (argv[argn].equals(new String(data, ZMQ.CHARSET))) {
                msg.send(cloudfe);
                msg = null;
            }
        }
        //  Route reply to client if we still need to
        if (msg != null)
            msg.send(localfe);
        while (capacity > 0) {
            rc = frontends.poll(0);
            assert (rc >= 0);
            int reroutable = 0;
            //  We'll do peer brokers first, to prevent starvation
            if (frontends.pollin(1)) {
                msg = ZMsg.recvMsg(cloudfe);
                reroutable = 0;
            } else if (frontends.pollin(0)) {
                msg = ZMsg.recvMsg(localfe);
                reroutable = 1;
            } else
                //  No work, go back to backends
                break;
            //
            if (reroutable != 0 && argv.length > 1 && rand.nextInt(5) == 0) {
                //  Route to random broker peer
                int random_peer = rand.nextInt(argv.length - 1) + 1;
                msg.push(argv[random_peer]);
                msg.send(cloudbe);
            } else {
                ZFrame frame = workers.remove(0);
                msg.wrap(frame);
                msg.send(localbe);
                capacity--;
            }
        }
    }
    //  When we're done, clean up properly
    while (workers.size() > 0) {
        ZFrame frame = workers.remove(0);
        frame.destroy();
    }
    ctx.destroy();
}
Also used : ArrayList(java.util.ArrayList) IOException(java.io.IOException) ZContext(org.zeromq.ZContext) ZMsg(org.zeromq.ZMsg) ZFrame(org.zeromq.ZFrame) Random(java.util.Random) Socket(org.zeromq.ZMQ.Socket) Poller(org.zeromq.ZMQ.Poller)

Example 22 with ZMsg

use of org.zeromq.ZMsg in project jeromq by zeromq.

the class peering3 method main.

//  The main task begins by setting-up all its sockets. The local frontend
//  talks to clients, and our local backend talks to workers. The cloud
//  frontend talks to peer brokers as if they were clients, and the cloud
//  backend talks to peer brokers as if they were workers. The state
//  backend publishes regular state messages, and the state frontend
//  subscribes to all state backends to collect these messages. Finally,
//  we use a PULL monitor socket to collect printable messages from tasks:
public static void main(String[] argv) {
    //
    if (argv.length < 1) {
        System.out.println("syntax: peering3 me {you}");
        System.exit(-1);
    }
    self = argv[0];
    System.out.printf("I: preparing broker at %s\n", self);
    Random rand = new Random(System.nanoTime());
    ZContext ctx = new ZContext();
    //  Prepare local frontend and backend
    Socket localfe = ctx.createSocket(ZMQ.ROUTER);
    localfe.bind(String.format("ipc://%s-localfe.ipc", self));
    Socket localbe = ctx.createSocket(ZMQ.ROUTER);
    localbe.bind(String.format("ipc://%s-localbe.ipc", self));
    //  Bind cloud frontend to endpoint
    Socket cloudfe = ctx.createSocket(ZMQ.ROUTER);
    cloudfe.setIdentity(self.getBytes(ZMQ.CHARSET));
    cloudfe.bind(String.format("ipc://%s-cloud.ipc", self));
    //  Connect cloud backend to all peers
    Socket cloudbe = ctx.createSocket(ZMQ.ROUTER);
    cloudbe.setIdentity(self.getBytes(ZMQ.CHARSET));
    int argn;
    for (argn = 1; argn < argv.length; argn++) {
        String peer = argv[argn];
        System.out.printf("I: connecting to cloud forintend at '%s'\n", peer);
        cloudbe.connect(String.format("ipc://%s-cloud.ipc", peer));
    }
    //  Bind state backend to endpoint
    Socket statebe = ctx.createSocket(ZMQ.PUB);
    statebe.bind(String.format("ipc://%s-state.ipc", self));
    //  Connect statefe to all peers
    Socket statefe = ctx.createSocket(ZMQ.SUB);
    statefe.subscribe(ZMQ.SUBSCRIPTION_ALL);
    for (argn = 1; argn < argv.length; argn++) {
        String peer = argv[argn];
        System.out.printf("I: connecting to state backend at '%s'\n", peer);
        statefe.connect(String.format("ipc://%s-state.ipc", peer));
    }
    //  Prepare monitor socket
    Socket monitor = ctx.createSocket(ZMQ.PULL);
    monitor.bind(String.format("ipc://%s-monitor.ipc", self));
    //  Start local workers
    int worker_nbr;
    for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) new worker_task().start();
    //  Start local clients
    int client_nbr;
    for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) new client_task().start();
    //  Queue of available workers
    int localCapacity = 0;
    int cloudCapacity = 0;
    ArrayList<ZFrame> workers = new ArrayList<ZFrame>();
    //  The main loop has two parts. First we poll workers and our two service
    //  sockets (statefe and monitor), in any case. If we have no ready workers,
    //  there's no point in looking at incoming requests. These can remain on
    //  their internal 0MQ queues:
    Poller primary = ctx.createPoller(4);
    primary.register(localbe, Poller.POLLIN);
    primary.register(cloudbe, Poller.POLLIN);
    primary.register(statefe, Poller.POLLIN);
    primary.register(monitor, Poller.POLLIN);
    Poller secondary = ctx.createPoller(2);
    secondary.register(localfe, Poller.POLLIN);
    secondary.register(cloudfe, Poller.POLLIN);
    while (true) {
        //  First, route any waiting replies from workers
        //  If we have no workers anyhow, wait indefinitely
        int rc = primary.poll(localCapacity > 0 ? 1000 : -1);
        if (rc == -1)
            //  Interrupted
            break;
        //  Track if capacity changes during this iteration
        int previous = localCapacity;
        //  Handle reply from local worker
        ZMsg msg = null;
        if (primary.pollin(0)) {
            msg = ZMsg.recvMsg(localbe);
            if (msg == null)
                //  Interrupted
                break;
            ZFrame address = msg.unwrap();
            workers.add(address);
            localCapacity++;
            //  If it's READY, don't route the message any further
            ZFrame frame = msg.getFirst();
            if (new String(frame.getData(), ZMQ.CHARSET).equals(WORKER_READY)) {
                msg.destroy();
                msg = null;
            }
        } else //  Or handle reply from peer broker
        if (primary.pollin(1)) {
            msg = ZMsg.recvMsg(cloudbe);
            if (msg == null)
                //  Interrupted
                break;
            //  We don't use peer broker address for anything
            ZFrame address = msg.unwrap();
            address.destroy();
        }
        //  Route reply to cloud if it's addressed to a broker
        for (argn = 1; msg != null && argn < argv.length; argn++) {
            byte[] data = msg.getFirst().getData();
            if (argv[argn].equals(new String(data, ZMQ.CHARSET))) {
                msg.send(cloudfe);
                msg = null;
            }
        }
        //  Route reply to client if we still need to
        if (msg != null)
            msg.send(localfe);
        if (primary.pollin(2)) {
            String peer = statefe.recvStr();
            String status = statefe.recvStr();
            cloudCapacity = Integer.parseInt(status);
        }
        if (primary.pollin(3)) {
            String status = monitor.recvStr();
            System.out.println(status);
        }
        while (localCapacity + cloudCapacity > 0) {
            rc = secondary.poll(0);
            assert (rc >= 0);
            if (secondary.pollin(0)) {
                msg = ZMsg.recvMsg(localfe);
            } else if (localCapacity > 0 && secondary.pollin(1)) {
                msg = ZMsg.recvMsg(cloudfe);
            } else
                //  No work, go back to backends
                break;
            if (localCapacity > 0) {
                ZFrame frame = workers.remove(0);
                msg.wrap(frame);
                msg.send(localbe);
                localCapacity--;
            } else {
                //  Route to random broker peer
                int random_peer = rand.nextInt(argv.length - 1) + 1;
                msg.push(argv[random_peer]);
                msg.send(cloudbe);
            }
        }
        if (localCapacity != previous) {
            //  We stick our own address onto the envelope
            statebe.sendMore(self);
            //  Broadcast new capacity
            statebe.send(String.format("%d", localCapacity), 0);
        }
    }
    //  When we're done, clean up properly
    while (workers.size() > 0) {
        ZFrame frame = workers.remove(0);
        frame.destroy();
    }
    ctx.destroy();
}
Also used : ArrayList(java.util.ArrayList) ZContext(org.zeromq.ZContext) ZMsg(org.zeromq.ZMsg) ZFrame(org.zeromq.ZFrame) Random(java.util.Random) Socket(org.zeromq.ZMQ.Socket) Poller(org.zeromq.ZMQ.Poller)

Example 23 with ZMsg

use of org.zeromq.ZMsg in project jeromq by zeromq.

the class spqueue method main.

public static void main(String[] args) {
    ZContext ctx = new ZContext();
    Socket frontend = ctx.createSocket(ZMQ.ROUTER);
    Socket backend = ctx.createSocket(ZMQ.ROUTER);
    //  For clients
    frontend.bind("tcp://*:5555");
    //  For workers
    backend.bind("tcp://*:5556");
    //  Queue of available workers
    ArrayList<ZFrame> workers = new ArrayList<ZFrame>();
    Poller poller = ctx.createPoller(2);
    poller.register(backend, Poller.POLLIN);
    poller.register(frontend, Poller.POLLIN);
    //  The body of this example is exactly the same as lruqueue2.
    while (true) {
        boolean workersAvailable = workers.size() > 0;
        int rc = poller.poll(-1);
        //  Poll frontend only if we have available workers
        if (rc == -1)
            //  Interrupted
            break;
        //  Handle worker activity on backend
        if (poller.pollin(0)) {
            //  Use worker address for LRU routing
            ZMsg msg = ZMsg.recvMsg(backend);
            if (msg == null)
                //  Interrupted
                break;
            ZFrame address = msg.unwrap();
            workers.add(address);
            //  Forward message to client if it's not a READY
            ZFrame frame = msg.getFirst();
            if (new String(frame.getData(), ZMQ.CHARSET).equals(WORKER_READY))
                msg.destroy();
            else
                msg.send(frontend);
        }
        if (workersAvailable && poller.pollin(1)) {
            //  Get client request, route to first available worker
            ZMsg msg = ZMsg.recvMsg(frontend);
            if (msg != null) {
                msg.wrap(workers.remove(0));
                msg.send(backend);
            }
        }
    }
    //  When we're done, clean up properly
    while (workers.size() > 0) {
        ZFrame frame = workers.remove(0);
        frame.destroy();
    }
    workers.clear();
    ctx.destroy();
}
Also used : ZFrame(org.zeromq.ZFrame) ArrayList(java.util.ArrayList) ZContext(org.zeromq.ZContext) ZMsg(org.zeromq.ZMsg) Socket(org.zeromq.ZMQ.Socket) Poller(org.zeromq.ZMQ.Poller)

Example 24 with ZMsg

use of org.zeromq.ZMsg in project jeromq by zeromq.

the class spworker method main.

public static void main(String[] args) throws Exception {
    ZContext ctx = new ZContext();
    Socket worker = ctx.createSocket(ZMQ.REQ);
    //  Set random identity to make tracing easier
    Random rand = new Random(System.nanoTime());
    String identity = String.format("%04X-%04X", rand.nextInt(0x10000), rand.nextInt(0x10000));
    worker.setIdentity(identity.getBytes(ZMQ.CHARSET));
    worker.connect("tcp://localhost:5556");
    //  Tell broker we're ready for work
    System.out.printf("I: (%s) worker ready\n", identity);
    ZFrame frame = new ZFrame(WORKER_READY);
    frame.send(worker, 0);
    int cycles = 0;
    while (true) {
        ZMsg msg = ZMsg.recvMsg(worker);
        if (msg == null)
            //  Interrupted
            break;
        //  Simulate various problems, after a few cycles
        cycles++;
        if (cycles > 3 && rand.nextInt(5) == 0) {
            System.out.printf("I: (%s) simulating a crash\n", identity);
            msg.destroy();
            break;
        } else if (cycles > 3 && rand.nextInt(5) == 0) {
            System.out.printf("I: (%s) simulating CPU overload\n", identity);
            Thread.sleep(3000);
        }
        System.out.printf("I: (%s) normal reply\n", identity);
        //  Do some heavy work
        Thread.sleep(1000);
        msg.send(worker);
    }
    ctx.destroy();
}
Also used : ZFrame(org.zeromq.ZFrame) Random(java.util.Random) ZContext(org.zeromq.ZContext) ZMsg(org.zeromq.ZMsg) Socket(org.zeromq.ZMQ.Socket)

Example 25 with ZMsg

use of org.zeromq.ZMsg in project jeromq by zeromq.

the class flserver2 method main.

public static void main(String[] args) {
    if (args.length < 1) {
        System.out.printf("I: syntax: flserver2 <endpoint>\n");
        System.exit(0);
    }
    ZContext ctx = new ZContext();
    Socket server = ctx.createSocket(ZMQ.REP);
    server.bind(args[0]);
    System.out.printf("I: echo service is ready at %s\n", args[0]);
    while (true) {
        ZMsg request = ZMsg.recvMsg(server);
        if (request == null)
            //  Interrupted
            break;
        //  Fail nastily if run against wrong client
        assert (request.size() == 2);
        ZFrame identity = request.pop();
        request.destroy();
        ZMsg reply = new ZMsg();
        reply.add(identity);
        reply.add("OK");
        reply.send(server);
    }
    if (Thread.currentThread().isInterrupted())
        System.out.printf("W: interrupted\n");
    ctx.destroy();
}
Also used : ZFrame(org.zeromq.ZFrame) ZContext(org.zeromq.ZContext) ZMsg(org.zeromq.ZMsg) Socket(org.zeromq.ZMQ.Socket)

Aggregations

ZMsg (org.zeromq.ZMsg)38 ZFrame (org.zeromq.ZFrame)20 ZContext (org.zeromq.ZContext)14 Socket (org.zeromq.ZMQ.Socket)14 Poller (org.zeromq.ZMQ.Poller)10 ArrayList (java.util.ArrayList)4 Random (java.util.Random)4 ZMQ (org.zeromq.ZMQ)4 IOException (java.io.IOException)3 File (java.io.File)2 RandomAccessFile (java.io.RandomAccessFile)2 BufferedWriter (java.io.BufferedWriter)1 DataInputStream (java.io.DataInputStream)1 DataOutputStream (java.io.DataOutputStream)1 FileInputStream (java.io.FileInputStream)1 FileNotFoundException (java.io.FileNotFoundException)1 FileOutputStream (java.io.FileOutputStream)1 FileWriter (java.io.FileWriter)1 LinkedList (java.util.LinkedList)1 PollItem (org.zeromq.ZMQ.PollItem)1