use of org.apache.thrift7.TException in project storm by nathanmarz.
the class DRPCSpout method nextTuple.
@Override
public void nextTuple() {
boolean gotRequest = false;
if (_local_drpc_id == null) {
for (int i = 0; i < _clients.size(); i++) {
DRPCInvocationsClient client = _clients.get(i);
try {
DRPCRequest req = client.fetchRequest(_function);
if (req.get_request_id().length() > 0) {
Map returnInfo = new HashMap();
returnInfo.put("id", req.get_request_id());
returnInfo.put("host", client.getHost());
returnInfo.put("port", client.getPort());
gotRequest = true;
_collector.emit(new Values(req.get_func_args(), JSONValue.toJSONString(returnInfo)), new DRPCMessageId(req.get_request_id(), i));
break;
}
} catch (TException e) {
LOG.error("Failed to fetch DRPC result from DRPC server", e);
}
}
} else {
DistributedRPCInvocations.Iface drpc = (DistributedRPCInvocations.Iface) ServiceRegistry.getService(_local_drpc_id);
if (drpc != null) {
// can happen during shutdown of drpc while topology is still up
try {
DRPCRequest req = drpc.fetchRequest(_function);
if (req.get_request_id().length() > 0) {
Map returnInfo = new HashMap();
returnInfo.put("id", req.get_request_id());
returnInfo.put("host", _local_drpc_id);
returnInfo.put("port", 0);
gotRequest = true;
_collector.emit(new Values(req.get_func_args(), JSONValue.toJSONString(returnInfo)), new DRPCMessageId(req.get_request_id(), 0));
}
} catch (TException e) {
throw new RuntimeException(e);
}
}
}
if (!gotRequest) {
Utils.sleep(1);
}
}
use of org.apache.thrift7.TException in project storm-kestrel by nathanmarz.
the class KestrelThriftSpout method ack.
public void ack(Object msgId) {
KestrelSourceId sourceId = (KestrelSourceId) msgId;
KestrelClientInfo info = _kestrels.get(sourceId.index);
//back on the queue
try {
if (info.client != null) {
HashSet xids = new HashSet();
xids.add(sourceId.id);
info.client.confirm(_queueName, xids);
}
} catch (TException e) {
blacklist(info, e);
}
}
use of org.apache.thrift7.TException in project storm by nathanmarz.
the class DRPCClient method connect.
private void connect() throws TException {
TSocket socket = new TSocket(host, port);
if (timeout != null) {
socket.setTimeout(timeout);
}
conn = new TFramedTransport(socket);
client = new DistributedRPC.Client(new TBinaryProtocol(conn));
conn.open();
}
use of org.apache.thrift7.TException in project storm by nathanmarz.
the class ReturnResults method execute.
@Override
public void execute(Tuple input) {
String result = (String) input.getValue(0);
String returnInfo = (String) input.getValue(1);
if (returnInfo != null) {
Map retMap = (Map) JSONValue.parse(returnInfo);
final String host = (String) retMap.get("host");
final int port = Utils.getInt(retMap.get("port"));
String id = (String) retMap.get("id");
DistributedRPCInvocations.Iface client;
if (local) {
client = (DistributedRPCInvocations.Iface) ServiceRegistry.getService(host);
} else {
List server = new ArrayList() {
{
add(host);
add(port);
}
};
if (!_clients.containsKey(server)) {
_clients.put(server, new DRPCInvocationsClient(host, port));
}
client = _clients.get(server);
}
try {
client.result(id, result);
_collector.ack(input);
} catch (TException e) {
LOG.error("Failed to return results to DRPC server", e);
_collector.fail(input);
}
}
}
use of org.apache.thrift7.TException in project storm by nathanmarz.
the class StormSubmitter method submitTopology.
/**
* Submits a topology to run on the cluster. A topology runs forever or until
* explicitly killed.
*
*
* @param name the name of the storm.
* @param stormConf the topology-specific configuration. See {@link Config}.
* @param topology the processing to execute.
* @param options to manipulate the starting of the topology
* @throws AlreadyAliveException if a topology with this name is already running
* @throws InvalidTopologyException if an invalid topology was submitted
*/
public static void submitTopology(String name, Map stormConf, StormTopology topology, SubmitOptions opts) throws AlreadyAliveException, InvalidTopologyException {
if (!Utils.isValidConf(stormConf)) {
throw new IllegalArgumentException("Storm conf is not valid. Must be json-serializable");
}
stormConf = new HashMap(stormConf);
stormConf.putAll(Utils.readCommandLineOpts());
Map conf = Utils.readStormConfig();
conf.putAll(stormConf);
try {
String serConf = JSONValue.toJSONString(stormConf);
if (localNimbus != null) {
LOG.info("Submitting topology " + name + " in local mode");
localNimbus.submitTopology(name, null, serConf, topology);
} else {
NimbusClient client = NimbusClient.getConfiguredClient(conf);
if (topologyNameExists(conf, name)) {
throw new RuntimeException("Topology with name `" + name + "` already exists on cluster");
}
submitJar(conf);
try {
LOG.info("Submitting topology " + name + " in distributed mode with conf " + serConf);
if (opts != null) {
client.getClient().submitTopologyWithOpts(name, submittedJar, serConf, topology, opts);
} else {
// this is for backwards compatibility
client.getClient().submitTopology(name, submittedJar, serConf, topology);
}
} catch (InvalidTopologyException e) {
LOG.warn("Topology submission exception", e);
throw e;
} catch (AlreadyAliveException e) {
LOG.warn("Topology already alive exception", e);
throw e;
} finally {
client.close();
}
}
LOG.info("Finished submitting topology: " + name);
} catch (TException e) {
throw new RuntimeException(e);
}
}
Aggregations