use of org.zeromq.ZMQ.Socket in project jeromq by zeromq.
the class titanic method main.
// .split worker task
// This is the main thread for the Titanic worker. It starts three child
// threads; for the request, reply, and close services. It then dispatches
// requests to workers using a simple brute force disk queue. It receives
// request UUIDs from the {{titanic.request}} service, saves these to a disk
// file, and then throws each request at MDP workers until it gets a
// response.
public static void main(String[] args) {
boolean verbose = (args.length > 0 && "-v".equals(args[0]));
ZContext ctx = new ZContext();
Socket requestPipe = ZThread.fork(ctx, new TitanicRequest());
ZThread.start(new TitanicReply());
ZThread.start(new TitanicClose());
Poller poller = ctx.createPoller(1);
poller.register(requestPipe, ZMQ.Poller.POLLIN);
// Main dispatcher loop
while (true) {
// We'll dispatch once per second, if there's no activity
int rc = poller.poll(1000);
if (rc == -1)
// Interrupted
break;
if (poller.pollin(0)) {
// Ensure message directory exists
new File(TITANIC_DIR).mkdirs();
// Append UUID to queue, prefixed with '-' for pending
ZMsg msg = ZMsg.recvMsg(requestPipe);
if (msg == null)
// Interrupted
break;
String uuid = msg.popString();
BufferedWriter wfile = null;
try {
wfile = new BufferedWriter(new FileWriter(TITANIC_DIR + "/queue", true));
wfile.write("-" + uuid + "\n");
} catch (IOException e) {
e.printStackTrace();
break;
} finally {
try {
if (wfile != null)
wfile.close();
} catch (IOException e) {
}
}
msg.destroy();
}
// Brute force dispatcher
//"?........:....:....:....:............:";
byte[] entry = new byte[37];
RandomAccessFile file = null;
try {
file = new RandomAccessFile(TITANIC_DIR + "/queue", "rw");
while (file.read(entry) > 0) {
// UUID is prefixed with '-' if still waiting
if (entry[0] == '-') {
if (verbose)
System.out.printf("I: processing request %s\n", new String(entry, 1, entry.length - 1, ZMQ.CHARSET));
if (serviceSuccess(new String(entry, 1, entry.length - 1, ZMQ.CHARSET))) {
// Mark queue entry as processed
file.seek(file.getFilePointer() - 37);
file.writeBytes("+");
file.seek(file.getFilePointer() + 36);
}
}
// Skip end of line, LF or CRLF
if (file.readByte() == '\r')
file.readByte();
if (Thread.currentThread().isInterrupted())
break;
}
} catch (FileNotFoundException e) {
} catch (IOException e) {
e.printStackTrace();
} finally {
if (file != null) {
try {
file.close();
} catch (IOException e) {
}
}
}
}
}
use of org.zeromq.ZMQ.Socket in project jeromq by zeromq.
the class wuproxy method main.
public static void main(String[] args) {
// Prepare our context and sockets
Context context = ZMQ.context(1);
// This is where the weather server sits
Socket frontend = context.socket(ZMQ.SUB);
frontend.connect("tcp://192.168.55.210:5556");
// This is our public endpoint for subscribers
Socket backend = context.socket(ZMQ.PUB);
backend.bind("tcp://10.1.1.0:8100");
// Subscribe on everything
frontend.subscribe(ZMQ.SUBSCRIPTION_ALL);
// Run the proxy until the user interrupts us
ZMQ.proxy(frontend, backend, null);
frontend.close();
backend.close();
context.term();
}
use of org.zeromq.ZMQ.Socket in project jeromq by zeromq.
the class pathosub method main.
public static void main(String[] args) {
ZContext context = new ZContext();
Socket subscriber = context.createSocket(ZMQ.SUB);
if (args.length == 1)
subscriber.connect(args[0]);
else
subscriber.connect("tcp://localhost:5556");
Random rand = new Random(System.currentTimeMillis());
String subscription = String.format("%03d", rand.nextInt(1000));
subscriber.subscribe(subscription.getBytes(ZMQ.CHARSET));
while (true) {
String topic = subscriber.recvStr();
if (topic == null)
break;
String data = subscriber.recvStr();
assert (topic.equals(subscription));
System.out.println(data);
}
context.destroy();
}
use of org.zeromq.ZMQ.Socket in project jeromq by zeromq.
the class peering1 method main.
public static void main(String[] argv) {
//
if (argv.length < 1) {
System.out.println("syntax: peering1 me {you}\n");
System.exit(-1);
}
String self = argv[0];
System.out.println(String.format("I: preparing broker at %s\n", self));
Random rand = new Random(System.nanoTime());
ZContext ctx = new ZContext();
// Bind state backend to endpoint
Socket statebe = ctx.createSocket(ZMQ.PUB);
statebe.bind(String.format("ipc://%s-state.ipc", self));
// Connect statefe to all peers
Socket statefe = ctx.createSocket(ZMQ.SUB);
statefe.subscribe(ZMQ.SUBSCRIPTION_ALL);
int argn;
for (argn = 1; argn < argv.length; argn++) {
String peer = argv[argn];
System.out.printf("I: connecting to state backend at '%s'\n", peer);
statefe.connect(String.format("ipc://%s-state.ipc", peer));
}
// The main loop sends out status messages to peers, and collects
// status messages back from peers. The zmq_poll timeout defines
// our own heartbeat.
Poller poller = ctx.createPoller(1);
poller.register(statefe, Poller.POLLIN);
while (true) {
// Poll for activity, or 1 second timeout
int rc = poller.poll(1000);
if (rc == -1)
// Interrupted
break;
// Handle incoming status messages
if (poller.pollin(0)) {
String peer_name = new String(statefe.recv(0), ZMQ.CHARSET);
String available = new String(statefe.recv(0), ZMQ.CHARSET);
System.out.printf("%s - %s workers free\n", peer_name, available);
} else {
// Send random values for worker availability
statebe.send(self, ZMQ.SNDMORE);
statebe.send(String.format("%d", rand.nextInt(10)), 0);
}
}
ctx.destroy();
}
use of org.zeromq.ZMQ.Socket in project jeromq by zeromq.
the class peering2 method main.
// The main task begins by setting-up its frontend and backend sockets
// and then starting its client and worker tasks:
public static void main(String[] argv) {
//
if (argv.length < 1) {
System.out.println("syntax: peering2 me {you}");
System.exit(-1);
}
self = argv[0];
System.out.printf("I: preparing broker at %s\n", self);
Random rand = new Random(System.nanoTime());
ZContext ctx = new ZContext();
// Bind cloud frontend to endpoint
Socket cloudfe = ctx.createSocket(ZMQ.ROUTER);
cloudfe.setIdentity(self.getBytes(ZMQ.CHARSET));
cloudfe.bind(String.format("ipc://%s-cloud.ipc", self));
// Connect cloud backend to all peers
Socket cloudbe = ctx.createSocket(ZMQ.ROUTER);
cloudbe.setIdentity(self.getBytes(ZMQ.CHARSET));
int argn;
for (argn = 1; argn < argv.length; argn++) {
String peer = argv[argn];
System.out.printf("I: connecting to cloud forintend at '%s'\n", peer);
cloudbe.connect(String.format("ipc://%s-cloud.ipc", peer));
}
// Prepare local frontend and backend
Socket localfe = ctx.createSocket(ZMQ.ROUTER);
localfe.bind(String.format("ipc://%s-localfe.ipc", self));
Socket localbe = ctx.createSocket(ZMQ.ROUTER);
localbe.bind(String.format("ipc://%s-localbe.ipc", self));
// Get user to tell us when we can start
System.out.println("Press Enter when all brokers are started: ");
try {
System.in.read();
} catch (IOException e) {
e.printStackTrace();
}
// Start local workers
int worker_nbr;
for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) new worker_task().start();
// Start local clients
int client_nbr;
for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) new client_task().start();
// Here we handle the request-reply flow. We're using the LRU approach
// to poll workers at all times, and clients only when there are one or
// more workers available.
// Least recently used queue of available workers
int capacity = 0;
ArrayList<ZFrame> workers = new ArrayList<ZFrame>();
Poller backends = ctx.createPoller(2);
backends.register(localbe, Poller.POLLIN);
backends.register(cloudbe, Poller.POLLIN);
Poller frontends = ctx.createPoller(2);
frontends.register(localfe, Poller.POLLIN);
frontends.register(cloudfe, Poller.POLLIN);
while (true) {
// First, route any waiting replies from workers
// If we have no workers anyhow, wait indefinitely
int rc = backends.poll(capacity > 0 ? 1000 : -1);
if (rc == -1)
// Interrupted
break;
// Handle reply from local worker
ZMsg msg = null;
if (backends.pollin(0)) {
msg = ZMsg.recvMsg(localbe);
if (msg == null)
// Interrupted
break;
ZFrame address = msg.unwrap();
workers.add(address);
capacity++;
// If it's READY, don't route the message any further
ZFrame frame = msg.getFirst();
if (new String(frame.getData(), ZMQ.CHARSET).equals(WORKER_READY)) {
msg.destroy();
msg = null;
}
} else // Or handle reply from peer broker
if (backends.pollin(1)) {
msg = ZMsg.recvMsg(cloudbe);
if (msg == null)
// Interrupted
break;
// We don't use peer broker address for anything
ZFrame address = msg.unwrap();
address.destroy();
}
// Route reply to cloud if it's addressed to a broker
for (argn = 1; msg != null && argn < argv.length; argn++) {
byte[] data = msg.getFirst().getData();
if (argv[argn].equals(new String(data, ZMQ.CHARSET))) {
msg.send(cloudfe);
msg = null;
}
}
// Route reply to client if we still need to
if (msg != null)
msg.send(localfe);
while (capacity > 0) {
rc = frontends.poll(0);
assert (rc >= 0);
int reroutable = 0;
// We'll do peer brokers first, to prevent starvation
if (frontends.pollin(1)) {
msg = ZMsg.recvMsg(cloudfe);
reroutable = 0;
} else if (frontends.pollin(0)) {
msg = ZMsg.recvMsg(localfe);
reroutable = 1;
} else
// No work, go back to backends
break;
//
if (reroutable != 0 && argv.length > 1 && rand.nextInt(5) == 0) {
// Route to random broker peer
int random_peer = rand.nextInt(argv.length - 1) + 1;
msg.push(argv[random_peer]);
msg.send(cloudbe);
} else {
ZFrame frame = workers.remove(0);
msg.wrap(frame);
msg.send(localbe);
capacity--;
}
}
}
// When we're done, clean up properly
while (workers.size() > 0) {
ZFrame frame = workers.remove(0);
frame.destroy();
}
ctx.destroy();
}
Aggregations