use of net.i2p.data.i2np.DatabaseStoreMessage in project i2p.i2p by i2p.
the class InboundMessageDistributor method distribute.
public void distribute(I2NPMessage msg, Hash target, TunnelId tunnel) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("IBMD for " + _client + " to " + target + " / " + tunnel + " : " + msg);
// allow messages on client tunnels even after client disconnection, as it may
// include e.g. test messages, etc. DataMessages will be dropped anyway
/*
if ( (_client != null) && (!_context.clientManager().isLocal(_client)) ) {
if (_log.shouldLog(Log.INFO))
_log.info("Not distributing a message, as it came down a client's tunnel ("
+ _client.toBase64() + ") after the client disconnected: " + msg);
return;
}
*/
int type = msg.getType();
// if the message came down a client tunnel:
if (_client != null) {
switch(type) {
case DatabaseSearchReplyMessage.MESSAGE_TYPE:
/**
**
* DatabaseSearchReplyMessage orig = (DatabaseSearchReplyMessage) msg;
* if (orig.getNumReplies() > 0) {
* if (_log.shouldLog(Log.INFO))
* _log.info("Removing replies from a DSRM down a tunnel for " + _client + ": " + msg);
* DatabaseSearchReplyMessage newMsg = new DatabaseSearchReplyMessage(_context);
* newMsg.setFromHash(orig.getFromHash());
* newMsg.setSearchKey(orig.getSearchKey());
* msg = newMsg;
* }
***
*/
break;
case DatabaseStoreMessage.MESSAGE_TYPE:
DatabaseStoreMessage dsm = (DatabaseStoreMessage) msg;
if (dsm.getEntry().getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
// Todo: if peer was ff and RI is not ff, queue for exploration in netdb (but that isn't part of the facade now)
if (_log.shouldLog(Log.WARN))
_log.warn("Dropping DSM down a tunnel for " + _client + ": " + msg);
// Handle safely by just updating the caps table, after doing basic validation
Hash key = dsm.getKey();
if (_context.routerHash().equals(key))
return;
RouterInfo ri = (RouterInfo) dsm.getEntry();
if (!key.equals(ri.getIdentity().getHash()))
return;
if (!ri.isValid())
return;
RouterInfo oldri = _context.netDb().lookupRouterInfoLocally(key);
// only update if RI is newer and non-ff
if (oldri != null && oldri.getPublished() < ri.getPublished() && !FloodfillNetworkDatabaseFacade.isFloodfill(ri)) {
if (_log.shouldLog(Log.WARN))
_log.warn("Updating caps for RI " + key + " from \"" + oldri.getCapabilities() + "\" to \"" + ri.getCapabilities() + '"');
_context.peerManager().setCapabilities(key, ri.getCapabilities());
}
return;
} else if (dsm.getReplyToken() != 0) {
_context.statManager().addRateData("tunnel.dropDangerousClientTunnelMessage", 1, type);
_log.error("Dropping LS DSM w/ reply token down a tunnel for " + _client + ": " + msg);
return;
} else {
// allow DSM of our own key (used by FloodfillVerifyStoreJob)
// or other keys (used by IterativeSearchJob)
// as long as there's no reply token (we will never set a reply token but an attacker might)
((LeaseSet) dsm.getEntry()).setReceivedAsReply();
}
break;
case DeliveryStatusMessage.MESSAGE_TYPE:
case GarlicMessage.MESSAGE_TYPE:
case TunnelBuildReplyMessage.MESSAGE_TYPE:
case VariableTunnelBuildReplyMessage.MESSAGE_TYPE:
// these are safe, handled below
break;
default:
// drop it, since we should only get the above message types down
// client tunnels
_context.statManager().addRateData("tunnel.dropDangerousClientTunnelMessage", 1, type);
_log.error("Dropped dangerous message down a tunnel for " + _client + ": " + msg, new Exception("cause"));
return;
}
// switch
} else {
// expl. tunnel
switch(type) {
case DatabaseStoreMessage.MESSAGE_TYPE:
DatabaseStoreMessage dsm = (DatabaseStoreMessage) msg;
if (dsm.getReplyToken() != 0) {
_context.statManager().addRateData("tunnel.dropDangerousExplTunnelMessage", 1, type);
_log.error("Dropping DSM w/ reply token down a expl. tunnel: " + msg);
return;
}
if (dsm.getEntry().getType() == DatabaseEntry.KEY_TYPE_LEASESET)
((LeaseSet) dsm.getEntry()).setReceivedAsReply();
break;
case DatabaseSearchReplyMessage.MESSAGE_TYPE:
case DeliveryStatusMessage.MESSAGE_TYPE:
case GarlicMessage.MESSAGE_TYPE:
case TunnelBuildReplyMessage.MESSAGE_TYPE:
case VariableTunnelBuildReplyMessage.MESSAGE_TYPE:
// these are safe, handled below
break;
default:
_context.statManager().addRateData("tunnel.dropDangerousExplTunnelMessage", 1, type);
_log.error("Dropped dangerous message down expl tunnel: " + msg, new Exception("cause"));
return;
}
// switch
}
if ((target == null) || ((tunnel == null) && (_context.routerHash().equals(target)))) {
// make sure we don't honor any remote requests directly (garlic instructions, etc)
if (type == GarlicMessage.MESSAGE_TYPE) {
// in case we're looking for replies to a garlic message (cough load tests cough)
_context.inNetMessagePool().handleReplies(msg);
// if (_log.shouldLog(Log.DEBUG))
// _log.debug("received garlic message in the tunnel, parse it out");
_receiver.receive((GarlicMessage) msg);
} else {
if (_log.shouldLog(Log.INFO))
_log.info("distributing inbound tunnel message into our inNetMessagePool: " + msg);
_context.inNetMessagePool().add(msg, null, null);
}
/**
**** latency measuring attack?
* } else if (_context.routerHash().equals(target)) {
* // the want to send it to a tunnel, except we are also that tunnel's gateway
* // dispatch it directly
* if (_log.shouldLog(Log.INFO))
* _log.info("distributing inbound tunnel message back out, except we are the gateway");
* TunnelGatewayMessage gw = new TunnelGatewayMessage(_context);
* gw.setMessage(msg);
* gw.setTunnelId(tunnel);
* gw.setMessageExpiration(_context.clock().now()+10*1000);
* gw.setUniqueId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
* _context.tunnelDispatcher().dispatch(gw);
*****
*/
} else {
// ok, they want us to send it remotely, but that'd bust our anonymity,
// so we send it out a tunnel first
// TODO use the OCMOSJ cache to pick OB tunnel we are already using?
TunnelInfo out = _context.tunnelManager().selectOutboundTunnel(_client, target);
if (out == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("no outbound tunnel to send the client message for " + _client + ": " + msg);
return;
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("distributing IB tunnel msg type " + type + " back out " + out + " targetting " + target);
TunnelId outId = out.getSendTunnelId(0);
if (outId == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("strange? outbound tunnel has no outboundId? " + out + " failing to distribute " + msg);
return;
}
long exp = _context.clock().now() + 20 * 1000;
if (msg.getMessageExpiration() < exp)
msg.setMessageExpiration(exp);
_context.tunnelDispatcher().dispatchOutbound(msg, outId, tunnel, target);
}
}
use of net.i2p.data.i2np.DatabaseStoreMessage in project i2p.i2p by i2p.
the class UDPTransport method messageReceived.
/**
* infinite loop
* public RouterAddress getCurrentAddress() {
* if (needsRebuild())
* rebuildExternalAddress(false);
* return super.getCurrentAddress();
* }
**
*/
@Override
public void messageReceived(I2NPMessage inMsg, RouterIdentity remoteIdent, Hash remoteIdentHash, long msToReceive, int bytesReceived) {
if (inMsg.getType() == DatabaseStoreMessage.MESSAGE_TYPE) {
DatabaseStoreMessage dsm = (DatabaseStoreMessage) inMsg;
DatabaseEntry entry = dsm.getEntry();
if (entry == null)
return;
if (entry.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO && ((RouterInfo) entry).getNetworkId() != _networkID) {
// this is pre-0.6.1.10, so it isn't going to happen any more
/*
if (remoteIdentHash != null) {
_context.banlist().banlistRouter(remoteIdentHash, "Sent us a peer from the wrong network");
dropPeer(remoteIdentHash);
if (_log.shouldLog(Log.ERROR))
_log.error("Dropping the peer " + remoteIdentHash
+ " because they are in the wrong net");
} else if (remoteIdent != null) {
_context.banlist().banlistRouter(remoteIdent.calculateHash(), "Sent us a peer from the wrong network");
dropPeer(remoteIdent.calculateHash());
if (_log.shouldLog(Log.ERROR))
_log.error("Dropping the peer " + remoteIdent.calculateHash()
+ " because they are in the wrong net");
}
*/
Hash peerHash = entry.getHash();
PeerState peer = getPeerState(peerHash);
if (peer != null) {
RemoteHostId remote = peer.getRemoteHostId();
_dropList.add(remote);
_context.statManager().addRateData("udp.dropPeerDroplist", 1);
_context.simpleTimer2().addEvent(new RemoveDropList(remote), DROPLIST_PERIOD);
}
markUnreachable(peerHash);
_context.banlist().banlistRouter(peerHash, "Part of the wrong network, version = " + ((RouterInfo) entry).getVersion());
// _context.banlist().banlistRouter(peerHash, "Part of the wrong network", STYLE);
if (peer != null)
sendDestroy(peer);
dropPeer(peerHash, false, "wrong network");
if (_log.shouldLog(Log.WARN))
_log.warn("Dropping the peer " + peerHash + " because they are in the wrong net: " + entry);
return;
} else {
if (entry.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received an RI from the same net");
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received a leaseSet: " + dsm);
}
}
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received another message: " + inMsg.getClass().getName());
}
PeerState peer = getPeerState(remoteIdentHash);
super.messageReceived(inMsg, remoteIdent, remoteIdentHash, msToReceive, bytesReceived);
if (peer != null)
peer.expireInboundMessages();
}
use of net.i2p.data.i2np.DatabaseStoreMessage in project i2p.i2p by i2p.
the class FloodOnlyLookupMatchJob method setMessage.
public void setMessage(I2NPMessage message) {
if (message instanceof DatabaseSearchReplyMessage) {
// DSRM processing now in FloodOnlyLookupSelector instead of here,
// a dsrm is only passed in when there are no more lookups remaining
// so that all DSRM's are processed, not just the last one.
_search.failed();
return;
}
try {
DatabaseStoreMessage dsm = (DatabaseStoreMessage) message;
if (_log.shouldLog(Log.INFO))
_log.info(_search.getJobId() + ": got a DSM for " + dsm.getKey().toBase64());
// Should we just pass the DataStructure directly back to somebody?
if (dsm.getEntry().getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
// Since HFDSMJ wants to setReceivedAsPublished(), we have to
// set a flag saying this was really the result of a query,
// so don't do that.
LeaseSet ls = (LeaseSet) dsm.getEntry();
ls.setReceivedAsReply();
getContext().netDb().store(dsm.getKey(), ls);
} else {
getContext().netDb().store(dsm.getKey(), (RouterInfo) dsm.getEntry());
}
} catch (UnsupportedCryptoException uce) {
_search.failed();
return;
} catch (IllegalArgumentException iae) {
if (_log.shouldLog(Log.WARN))
_log.warn(_search.getJobId() + ": Received an invalid store reply", iae);
}
}
use of net.i2p.data.i2np.DatabaseStoreMessage in project i2p.i2p by i2p.
the class PeerTestJob method testPeer.
/**
* Fire off the necessary jobs and messages to test the given peer
* The message is a store of the peer's RI to itself,
* with a reply token.
*/
private void testPeer(RouterInfo peer) {
TunnelInfo inTunnel = getInboundTunnelId();
if (inTunnel == null) {
_log.warn("No tunnels to get peer test replies through!");
return;
}
TunnelId inTunnelId = inTunnel.getReceiveTunnelId(0);
RouterInfo inGateway = getContext().netDb().lookupRouterInfoLocally(inTunnel.getPeer(0));
if (inGateway == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("We can't find the gateway to our inbound tunnel?! Impossible?");
return;
}
int timeoutMs = getTestTimeout();
long expiration = getContext().clock().now() + timeoutMs;
long nonce = 1 + getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE - 1);
DatabaseStoreMessage msg = buildMessage(peer, inTunnelId, inGateway.getIdentity().getHash(), nonce, expiration);
TunnelInfo outTunnel = getOutboundTunnelId();
if (outTunnel == null) {
_log.warn("No tunnels to send search out through! Something is wrong...");
return;
}
TunnelId outTunnelId = outTunnel.getSendTunnelId(0);
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Sending peer test to " + peer.getIdentity().getHash().toBase64() + " out " + outTunnel + " w/ replies through " + inTunnel);
ReplySelector sel = new ReplySelector(peer.getIdentity().getHash(), nonce, expiration);
PeerReplyFoundJob reply = new PeerReplyFoundJob(getContext(), peer, inTunnel, outTunnel);
PeerReplyTimeoutJob timeoutJob = new PeerReplyTimeoutJob(getContext(), peer, inTunnel, outTunnel, sel);
getContext().messageRegistry().registerPending(sel, reply, timeoutJob);
getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnelId, null, peer.getIdentity().getHash());
}
use of net.i2p.data.i2np.DatabaseStoreMessage in project i2p.i2p by i2p.
the class SearchUpdateReplyFoundJob method runJob.
public void runJob() {
if (_isFloodfillPeer)
_job.decrementOutstandingFloodfillSearches();
I2NPMessage message = _message;
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Reply from " + _peer.toBase64() + " with message " + message.getClass().getSimpleName());
long howLong = System.currentTimeMillis() - _sentOn;
// assume requests are 1KB (they're almost always much smaller, but tunnels have a fixed size)
int msgSize = 1024;
if (_replyTunnel != null) {
for (int i = 0; i < _replyTunnel.getLength(); i++) getContext().profileManager().tunnelDataPushed(_replyTunnel.getPeer(i), howLong, msgSize);
_replyTunnel.incrementVerifiedBytesTransferred(msgSize);
}
if (_outTunnel != null) {
for (int i = 0; i < _outTunnel.getLength(); i++) getContext().profileManager().tunnelDataPushed(_outTunnel.getPeer(i), howLong, msgSize);
_outTunnel.incrementVerifiedBytesTransferred(msgSize);
}
if (message instanceof DatabaseStoreMessage) {
long timeToReply = _state.dataFound(_peer);
DatabaseStoreMessage msg = (DatabaseStoreMessage) message;
DatabaseEntry entry = msg.getEntry();
try {
_facade.store(msg.getKey(), entry);
getContext().profileManager().dbLookupSuccessful(_peer, timeToReply);
} catch (UnsupportedCryptoException iae) {
// don't blame the peer
getContext().profileManager().dbLookupSuccessful(_peer, timeToReply);
_state.abort();
// searchNext() will call fail()
} catch (IllegalArgumentException iae) {
if (_log.shouldLog(Log.WARN))
_log.warn("Peer " + _peer + " sent us invalid data: ", iae);
// blame the peer
getContext().profileManager().dbLookupReply(_peer, 0, 0, 1, 0, timeToReply);
}
} else if (message instanceof DatabaseSearchReplyMessage) {
_job.replyFound((DatabaseSearchReplyMessage) message, _peer);
} else {
if (_log.shouldLog(Log.ERROR))
_log.error(getJobId() + ": What?! Reply job matched a strange message: " + message);
return;
}
_job.searchNext();
}
Aggregations