use of net.i2p.data.i2np.TunnelGatewayMessage in project i2p.i2p by i2p.
the class BuildHandler method handleReq.
/**
* If we are dropping lots of requests before even trying to handle them,
* I suppose you could call us "overloaded"
*/
/**
** unused, see handleReq() below
* private final static int MAX_PROACTIVE_DROPS = 240;
*
* private int countProactiveDrops() {
* int dropped = 0;
* dropped += countEvents("tunnel.dropLoadProactive", 60*1000);
* dropped += countEvents("tunnel.dropLoad", 60*1000);
* dropped += countEvents("tunnel.dropLoadBacklog", 60*1000);
* dropped += countEvents("tunnel.dropLoadDelay", 60*1000);
* return dropped;
* }
*
* private int countEvents(String stat, long period) {
* RateStat rs = _context.statManager().getRate(stat);
* if (rs != null) {
* Rate r = rs.getRate(period);
* if (r != null)
* return (int)r.getCurrentEventCount();
* }
* return 0;
* }
***
*/
/**
* Actually process the request and send the reply.
*
* Todo: Replies are not subject to RED for bandwidth reasons,
* and the bandwidth is not credited to any tunnel.
* If we did credit the reply to the tunnel, it would
* prevent the classification of the tunnel as 'inactive' on tunnels.jsp.
*/
private void handleReq(RouterInfo nextPeerInfo, BuildMessageState state, BuildRequestRecord req, Hash nextPeer) {
long ourId = req.readReceiveTunnelId();
long nextId = req.readNextTunnelId();
boolean isInGW = req.readIsInboundGateway();
boolean isOutEnd = req.readIsOutboundEndpoint();
Hash from = state.fromHash;
if (from == null && state.from != null)
from = state.from.calculateHash();
if (isInGW && isOutEnd) {
_context.statManager().addRateData("tunnel.rejectHostile", 1);
_log.error("Dropping build request, IBGW+OBEP: " + req);
if (from != null)
_context.commSystem().mayDisconnect(from);
return;
}
if (ourId <= 0 || ourId > TunnelId.MAX_ID_VALUE || nextId <= 0 || nextId > TunnelId.MAX_ID_VALUE) {
_context.statManager().addRateData("tunnel.rejectHostile", 1);
if (_log.shouldWarn())
_log.warn("Dropping build request, bad tunnel ID: " + req);
if (from != null)
_context.commSystem().mayDisconnect(from);
return;
}
// Loop checks
if ((!isOutEnd) && _context.routerHash().equals(nextPeer)) {
_context.statManager().addRateData("tunnel.rejectHostile", 1);
// old i2pd
if (_log.shouldWarn())
_log.warn("Dropping build request, we are the next hop: " + req);
if (from != null)
_context.commSystem().mayDisconnect(from);
return;
}
if (!isInGW) {
// but if not, something is seriously wrong here.
if (from == null || _context.routerHash().equals(from)) {
_context.statManager().addRateData("tunnel.rejectHostile", 1);
if (_log.shouldWarn())
_log.warn("Dropping build request, we are the previous hop: " + req);
return;
}
}
if ((!isOutEnd) && (!isInGW)) {
// A-B-C-A is not preventable
if (nextPeer.equals(from)) {
// i2pd does this
_context.statManager().addRateData("tunnel.rejectHostile", 1);
if (_log.shouldLog(Log.WARN))
_log.warn("Dropping build request with the same previous and next hop: " + req);
_context.commSystem().mayDisconnect(from);
return;
}
}
// time is in hours, rounded down.
// tunnel-alt-creation.html specifies that this is enforced +/- 1 hour but it was not.
// As of 0.9.16, allow + 5 minutes to - 65 minutes.
long time = req.readRequestTime();
long now = (_context.clock().now() / (60l * 60l * 1000l)) * (60 * 60 * 1000);
long timeDiff = now - time;
if (timeDiff > MAX_REQUEST_AGE) {
_context.statManager().addRateData("tunnel.rejectTooOld", 1);
if (_log.shouldLog(Log.WARN))
_log.warn("Dropping build request too old... replay attack? " + DataHelper.formatDuration(timeDiff) + ": " + req);
if (from != null)
_context.commSystem().mayDisconnect(from);
return;
}
if (timeDiff < 0 - MAX_REQUEST_FUTURE) {
_context.statManager().addRateData("tunnel.rejectFuture", 1);
if (_log.shouldLog(Log.WARN))
_log.warn("Dropping build request too far in future " + DataHelper.formatDuration(0 - timeDiff) + ": " + req);
if (from != null)
_context.commSystem().mayDisconnect(from);
return;
}
int response;
if (_context.router().isHidden()) {
_context.throttle().setTunnelStatus(_x("Rejecting tunnels: Hidden mode"));
response = TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
} else {
response = _context.throttle().acceptTunnelRequest();
}
// This only checked OUR tunnels, so the log message was wrong.
// Now checked by TunnelDispatcher.joinXXX()
// and returned as success value, checked below.
// if (_context.tunnelManager().getTunnelInfo(new TunnelId(ourId)) != null) {
// if (_log.shouldLog(Log.ERROR))
// _log.error("Already participating in a tunnel with the given Id (" + ourId + "), so gotta reject");
// if (response == 0)
// response = TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT;
// }
// if ( (response == 0) && (_context.random().nextInt(50) <= 1) )
// response = TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT;
long recvDelay = _context.clock().now() - state.recvTime;
if (response == 0) {
// unused
// int proactiveDrops = countProactiveDrops();
float pDrop = ((float) recvDelay) / (float) (BuildRequestor.REQUEST_TIMEOUT * 3);
pDrop = (float) Math.pow(pDrop, 16);
if (_context.random().nextFloat() < pDrop) {
// || (proactiveDrops > MAX_PROACTIVE_DROPS) ) ) {
_context.statManager().addRateData("tunnel.rejectOverloaded", recvDelay);
_context.throttle().setTunnelStatus(_x("Rejecting tunnels: Request overload"));
// if (true || (proactiveDrops < MAX_PROACTIVE_DROPS*2))
response = TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
// else
// response = TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
} else {
_context.statManager().addRateData("tunnel.acceptLoad", recvDelay);
}
}
/*
* Being a IBGW or OBEP generally leads to more connections, so if we are
* approaching our connection limit (i.e. !haveCapacity()),
* reject this request.
*
* Don't do this for class N or O, under the assumption that they are already talking
* to most of the routers, so there's no reason to reject. This may drive them
* to their conn. limits, but it's hopefully a temporary solution to the
* tunnel build congestion. As the net grows this will have to be revisited.
*/
RouterInfo ri = _context.router().getRouterInfo();
if (response == 0) {
if (ri == null) {
// ?? We should always have a RI
response = TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
} else {
char bw = ri.getBandwidthTier().charAt(0);
if (bw != 'O' && bw != 'N' && bw != 'P' && bw != 'X' && ((isInGW && !_context.commSystem().haveInboundCapacity(87)) || (isOutEnd && !_context.commSystem().haveOutboundCapacity(87)))) {
_context.statManager().addRateData("tunnel.rejectConnLimits", 1);
_context.throttle().setTunnelStatus(_x("Rejecting tunnels: Connection limit"));
response = TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
}
}
}
// We may need another counter above for requests.
if (response == 0 && !isInGW) {
if (from != null && _throttler.shouldThrottle(from)) {
if (_log.shouldLog(Log.WARN))
_log.warn("Rejecting tunnel (hop throttle), previous hop: " + from + ": " + req);
// no setTunnelStatus() indication
_context.statManager().addRateData("tunnel.rejectHopThrottle", 1);
response = TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
}
}
if (response == 0 && (!isOutEnd) && _throttler.shouldThrottle(nextPeer)) {
if (_log.shouldLog(Log.WARN))
_log.warn("Rejecting tunnel (hop throttle), next hop: " + req);
_context.statManager().addRateData("tunnel.rejectHopThrottle", 1);
// no setTunnelStatus() indication
response = TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
}
HopConfig cfg = null;
if (response == 0) {
cfg = new HopConfig();
cfg.setCreation(_context.clock().now());
cfg.setExpiration(_context.clock().now() + 10 * 60 * 1000);
cfg.setIVKey(req.readIVKey());
cfg.setLayerKey(req.readLayerKey());
if (isInGW) {
// default
// cfg.setReceiveFrom(null);
} else {
if (from != null) {
cfg.setReceiveFrom(from);
} else {
// b0rk
return;
}
}
cfg.setReceiveTunnelId(DataHelper.toLong(4, ourId));
if (isOutEnd) {
// default
// cfg.setSendTo(null);
// cfg.setSendTunnelId(null);
} else {
cfg.setSendTo(nextPeer);
cfg.setSendTunnelId(DataHelper.toLong(4, nextId));
}
// now "actually" join
boolean success;
if (isOutEnd)
success = _context.tunnelDispatcher().joinOutboundEndpoint(cfg);
else if (isInGW)
success = _context.tunnelDispatcher().joinInboundGateway(cfg);
else
success = _context.tunnelDispatcher().joinParticipant(cfg);
if (success) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Joining: " + req);
} else {
// Dup Tunnel ID. This can definitely happen (birthday paradox).
// Probability in 11 minutes (per hop type):
// 0.1% for 2900 tunnels; 1% for 9300 tunnels
response = TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
_context.statManager().addRateData("tunnel.rejectDupID", 1);
if (_log.shouldLog(Log.WARN))
_log.warn("DUP ID failure: " + req);
}
}
if (response != 0) {
_context.statManager().addRateData("tunnel.reject." + response, 1);
_context.messageHistory().tunnelRejected(from, new TunnelId(ourId), nextPeer, // (isOutEnd ? "outbound endpoint" : isInGW ? "inbound gw" : "participant"));
Integer.toString(response));
if (from != null)
_context.commSystem().mayDisconnect(from);
// 81% = between 75% control measures in Transports and 87% rejection above
if ((!_context.routerHash().equals(nextPeer)) && (!_context.commSystem().haveOutboundCapacity(81)) && (!_context.commSystem().isEstablished(nextPeer))) {
_context.statManager().addRateData("tunnel.dropConnLimits", 1);
if (_log.shouldLog(Log.WARN))
_log.warn("Not sending rejection due to conn limits: " + req);
return;
}
} else if (isInGW && from != null) {
// we're the start of the tunnel, no use staying connected
_context.commSystem().mayDisconnect(from);
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Responding to " + state.msg.getUniqueId() + " after " + recvDelay + " with " + response + " from " + (from != null ? from : "tunnel") + ": " + req);
EncryptedBuildRecord reply = BuildResponseRecord.create(_context, response, req.readReplyKey(), req.readReplyIV(), state.msg.getUniqueId());
int records = state.msg.getRecordCount();
int ourSlot = -1;
for (int j = 0; j < records; j++) {
if (state.msg.getRecord(j) == null) {
ourSlot = j;
state.msg.setRecord(j, reply);
// + ": " + Base64.encode(reply));
break;
}
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Read slot " + ourSlot + " containing: " + req + " accepted? " + response + " recvDelay " + recvDelay + " replyMessage " + req.readReplyMessageId());
// now actually send the response
long expires = _context.clock().now() + NEXT_HOP_SEND_TIMEOUT;
if (!isOutEnd) {
state.msg.setUniqueId(req.readReplyMessageId());
state.msg.setMessageExpiration(expires);
OutNetMessage msg = new OutNetMessage(_context, state.msg, expires, PRIORITY, nextPeerInfo);
if (response == 0)
msg.setOnFailedSendJob(new TunnelBuildNextHopFailJob(_context, cfg));
_context.outNetMessagePool().add(msg);
} else {
// We are the OBEP.
// send it to the reply tunnel on the reply peer within a new TunnelBuildReplyMessage
// (enough layers jrandom?)
TunnelBuildReplyMessage replyMsg;
if (records == TunnelBuildMessage.MAX_RECORD_COUNT)
replyMsg = new TunnelBuildReplyMessage(_context);
else
replyMsg = new VariableTunnelBuildReplyMessage(_context, records);
for (int i = 0; i < records; i++) replyMsg.setRecord(i, state.msg.getRecord(i));
replyMsg.setUniqueId(req.readReplyMessageId());
replyMsg.setMessageExpiration(expires);
TunnelGatewayMessage m = new TunnelGatewayMessage(_context);
m.setMessage(replyMsg);
m.setMessageExpiration(expires);
m.setTunnelId(new TunnelId(nextId));
if (_context.routerHash().equals(nextPeer)) {
// ok, we are the gateway, so inject it
if (_log.shouldLog(Log.DEBUG))
_log.debug("We are the reply gateway for " + nextId + " when replying to replyMessage " + req);
_context.tunnelDispatcher().dispatch(m);
} else {
// ok, the gateway is some other peer, shove 'er across
OutNetMessage outMsg = new OutNetMessage(_context, m, expires, PRIORITY, nextPeerInfo);
if (response == 0)
outMsg.setOnFailedSendJob(new TunnelBuildNextHopFailJob(_context, cfg));
_context.outNetMessagePool().add(outMsg);
}
}
}
use of net.i2p.data.i2np.TunnelGatewayMessage in project i2p.i2p by i2p.
the class HandleFloodfillDatabaseStoreMessageJob method sendAck.
private void sendAck(Hash storedKey) {
DeliveryStatusMessage msg = new DeliveryStatusMessage(getContext());
msg.setMessageId(_message.getReplyToken());
// Randomize for a little protection against clock-skew fingerprinting.
// But the "arrival" isn't used for anything, right?
// TODO just set to 0?
// TODO we have no session to garlic wrap this with, needs new message
msg.setArrival(getContext().clock().now() - getContext().random().nextInt(3 * 1000));
// may be null
TunnelId replyTunnel = _message.getReplyTunnel();
// A store of our own RI, only if we are not FF
DatabaseStoreMessage msg2;
if ((getContext().netDb().floodfillEnabled() && !getContext().router().gracefulShutdownInProgress()) || storedKey.equals(getContext().routerHash())) {
// don't send our RI if the store was our RI (from PeerTestJob)
msg2 = null;
} else {
// we aren't ff, send a go-away message
msg2 = new DatabaseStoreMessage(getContext());
RouterInfo me = getContext().router().getRouterInfo();
msg2.setEntry(me);
if (_log.shouldWarn())
_log.warn("Got a store w/ reply token, but we aren't ff: from: " + _from + " fromHash: " + _fromHash + " msg: " + _message, new Exception());
}
Hash toPeer = _message.getReplyGateway();
boolean toUs = getContext().routerHash().equals(toPeer);
// else through an exploratory tunnel.
if (toUs && replyTunnel != null) {
// if we are the gateway, act as if we received it
TunnelGatewayMessage tgm = new TunnelGatewayMessage(getContext());
tgm.setMessage(msg);
tgm.setTunnelId(replyTunnel);
tgm.setMessageExpiration(msg.getMessageExpiration());
getContext().tunnelDispatcher().dispatch(tgm);
if (msg2 != null) {
TunnelGatewayMessage tgm2 = new TunnelGatewayMessage(getContext());
tgm2.setMessage(msg2);
tgm2.setTunnelId(replyTunnel);
tgm2.setMessageExpiration(msg.getMessageExpiration());
getContext().tunnelDispatcher().dispatch(tgm2);
}
} else if (toUs || getContext().commSystem().isEstablished(toPeer)) {
Job send = new SendMessageDirectJob(getContext(), msg, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY);
send.runJob();
if (msg2 != null) {
Job send2 = new SendMessageDirectJob(getContext(), msg2, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY);
send2.runJob();
}
} else {
// pick tunnel with endpoint closest to toPeer
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundExploratoryTunnel(toPeer);
if (outTunnel == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("No outbound tunnel could be found");
return;
}
getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnel.getSendTunnelId(0), replyTunnel, toPeer);
if (msg2 != null)
getContext().tunnelDispatcher().dispatchOutbound(msg2, outTunnel.getSendTunnelId(0), replyTunnel, toPeer);
}
}
use of net.i2p.data.i2np.TunnelGatewayMessage in project i2p.i2p by i2p.
the class HandleGarlicMessageJob method handleClove.
public void handleClove(DeliveryInstructions instructions, I2NPMessage data) {
switch(instructions.getDeliveryMode()) {
case DeliveryInstructions.DELIVERY_MODE_LOCAL:
if (_log.shouldLog(Log.DEBUG))
_log.debug("local delivery instructions for clove: " + data);
getContext().inNetMessagePool().add(data, null, null);
return;
case DeliveryInstructions.DELIVERY_MODE_DESTINATION:
if (_log.shouldLog(Log.ERROR))
_log.error("this message didn't come down a tunnel, not forwarding to a destination: " + instructions + " - " + data);
return;
case DeliveryInstructions.DELIVERY_MODE_ROUTER:
if (getContext().routerHash().equals(instructions.getRouter())) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("router delivery instructions targetting us");
getContext().inNetMessagePool().add(data, null, null);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("router delivery instructions targetting " + instructions.getRouter().toBase64().substring(0, 4) + " for " + data);
SendMessageDirectJob j = new SendMessageDirectJob(getContext(), data, instructions.getRouter(), 10 * 1000, ROUTER_PRIORITY);
// run it inline (adds to the outNetPool if it has the router info, otherwise queue a lookup)
j.runJob();
// getContext().jobQueue().addJob(j);
}
return;
case DeliveryInstructions.DELIVERY_MODE_TUNNEL:
TunnelGatewayMessage gw = new TunnelGatewayMessage(getContext());
gw.setMessage(data);
gw.setTunnelId(instructions.getTunnelId());
gw.setMessageExpiration(data.getMessageExpiration());
if (_log.shouldLog(Log.DEBUG))
_log.debug("tunnel delivery instructions targetting " + instructions.getRouter().toBase64().substring(0, 4) + " for " + data);
SendMessageDirectJob job = new SendMessageDirectJob(getContext(), gw, instructions.getRouter(), 10 * 1000, TUNNEL_PRIORITY);
// run it inline (adds to the outNetPool if it has the router info, otherwise queue a lookup)
job.runJob();
// getContext().jobQueue().addJob(job);
return;
default:
_log.error("Unknown instruction " + instructions.getDeliveryMode() + ": " + instructions);
return;
}
}
use of net.i2p.data.i2np.TunnelGatewayMessage in project i2p.i2p by i2p.
the class OutboundMessageDistributor method distribute.
private void distribute(I2NPMessage msg, RouterInfo target, TunnelId tunnel) {
I2NPMessage m = msg;
if (tunnel != null) {
TunnelGatewayMessage t = new TunnelGatewayMessage(_context);
t.setMessage(msg);
t.setTunnelId(tunnel);
t.setMessageExpiration(m.getMessageExpiration());
m = t;
}
if (_context.routerHash().equals(target.getIdentity().calculateHash())) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("queueing inbound message to ourselves: " + m);
// TODO if UnknownI2NPMessage, convert it.
// See FragmentHandler.receiveComplete()
_context.inNetMessagePool().add(m, null, null);
return;
} else {
OutNetMessage out = new OutNetMessage(_context, m, _context.clock().now() + MAX_DISTRIBUTE_TIME, _priority, target);
if (_log.shouldLog(Log.DEBUG))
_log.debug("queueing outbound message to " + target.getIdentity().calculateHash());
_context.outNetMessagePool().add(out);
}
}
use of net.i2p.data.i2np.TunnelGatewayMessage in project i2p.i2p by i2p.
the class HandleDatabaseLookupMessageJob method sendThroughTunnel.
private void sendThroughTunnel(I2NPMessage message, Hash toPeer, TunnelId replyTunnel) {
if (getContext().routerHash().equals(toPeer)) {
// if we are the gateway, act as if we received it
TunnelGatewayMessage m = new TunnelGatewayMessage(getContext());
m.setMessage(message);
m.setTunnelId(replyTunnel);
m.setMessageExpiration(message.getMessageExpiration());
getContext().tunnelDispatcher().dispatch(m);
} else {
// if we aren't the gateway, forward it on
if (!_replyKeyConsumed) {
// if we send a followup DSM w/ our RI, don't reuse key
SessionKey replyKey = _message.getReplyKey();
if (replyKey != null) {
// encrypt the reply
if (_log.shouldLog(Log.INFO))
_log.info("Sending encrypted reply to " + toPeer + ' ' + replyKey + ' ' + _message.getReplyTag());
message = MessageWrapper.wrap(getContext(), message, replyKey, _message.getReplyTag());
if (message == null) {
_log.error("Encryption error");
return;
}
_replyKeyConsumed = true;
}
}
TunnelGatewayMessage m = new TunnelGatewayMessage(getContext());
m.setMessage(message);
m.setMessageExpiration(message.getMessageExpiration());
m.setTunnelId(replyTunnel);
SendMessageDirectJob j = new SendMessageDirectJob(getContext(), m, toPeer, 10 * 1000, MESSAGE_PRIORITY);
j.runJob();
// getContext().jobQueue().addJob(j);
}
}
Aggregations