use of net.i2p.data.TunnelId in project i2p.i2p by i2p.
the class StoreJob method sendStoreThroughClient.
/**
* Send a leaseset store message out the client tunnel,
* with the reply to come back through a client tunnel.
* Stores are garlic encrypted to hide the identity from the OBEP.
*
* This makes it harder for an exploratory OBEP or IBGW to correlate it
* with one or more destinations. Since we are publishing the leaseset,
* it's easy to find out that an IB tunnel belongs to this dest, and
* it isn't much harder to do the same for an OB tunnel.
*
* As a side benefit, client tunnels should be faster and more reliable than
* exploratory tunnels.
*
* @param msg must contain a leaseset
* @since 0.7.10
*/
private void sendStoreThroughClient(DatabaseStoreMessage msg, RouterInfo peer, long expiration) {
long token = 1 + getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE);
Hash client = msg.getKey();
Hash to = peer.getIdentity().getHash();
TunnelInfo replyTunnel = getContext().tunnelManager().selectInboundTunnel(client, to);
if (replyTunnel == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("No reply inbound tunnels available!");
fail();
return;
}
TunnelId replyTunnelId = replyTunnel.getReceiveTunnelId(0);
msg.setReplyToken(token);
msg.setReplyTunnel(replyTunnelId);
msg.setReplyGateway(replyTunnel.getPeer(0));
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": send(dbStore) w/ token expected " + token);
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundTunnel(client, to);
if (outTunnel != null) {
I2NPMessage sent;
boolean shouldEncrypt = supportsEncryption(peer);
if (shouldEncrypt) {
// garlic encrypt
MessageWrapper.WrappedMessage wm = MessageWrapper.wrap(getContext(), msg, client, peer);
if (wm == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("Fail garlic encrypting from: " + client);
fail();
return;
}
sent = wm.getMessage();
_state.addPending(to, wm);
} else {
_state.addPending(to);
// now that almost all floodfills are at 0.7.10,
// just refuse to store unencrypted to older ones.
_state.replyTimeout(to);
getContext().jobQueue().addJob(new WaitJob(getContext()));
return;
}
SendSuccessJob onReply = new SendSuccessJob(getContext(), peer, outTunnel, sent.getMessageSize());
FailedJob onFail = new FailedJob(getContext(), peer, getContext().clock().now());
StoreMessageSelector selector = new StoreMessageSelector(getContext(), getJobId(), peer, token, expiration);
if (_log.shouldLog(Log.DEBUG)) {
if (shouldEncrypt)
_log.debug("sending encrypted store to " + peer.getIdentity().getHash() + " through " + outTunnel + ": " + sent);
else
_log.debug("sending store to " + peer.getIdentity().getHash() + " through " + outTunnel + ": " + sent);
// _log.debug("Expiration is " + new Date(sent.getMessageExpiration()));
}
getContext().messageRegistry().registerPending(selector, onReply, onFail);
getContext().tunnelDispatcher().dispatchOutbound(sent, outTunnel.getSendTunnelId(0), null, to);
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("No outbound tunnels to send a dbStore out - delaying...");
// continueSending() above did an addPending() so remove it here.
// This means we will skip the peer next time, can't be helped for now
// without modding StoreState
_state.replyTimeout(to);
Job waiter = new WaitJob(getContext());
waiter.getTiming().setStartAfter(getContext().clock().now() + 3 * 1000);
getContext().jobQueue().addJob(waiter);
// fail();
}
}
use of net.i2p.data.TunnelId in project i2p.i2p by i2p.
the class TunnelPool method locked_buildNewLeaseSet.
/**
* Build a leaseSet with the required tunnels that aren't about to expire.
* Caller must synchronize on _tunnels.
*
* @return null on failure
*/
protected LeaseSet locked_buildNewLeaseSet() {
if (!_alive)
return null;
int wanted = Math.min(_settings.getQuantity(), LeaseSet.MAX_LEASES);
if (_tunnels.size() < wanted) {
if (_log.shouldLog(Log.WARN))
_log.warn(toString() + ": Not enough tunnels (" + _tunnels.size() + ", wanted " + wanted + ")");
// see comment below
if (_tunnels.isEmpty())
return null;
}
// + _settings.getRebuildPeriod();
long expireAfter = _context.clock().now();
TunnelInfo zeroHopTunnel = null;
Lease zeroHopLease = null;
TreeSet<Lease> leases = new TreeSet<Lease>(new LeaseComparator());
for (int i = 0; i < _tunnels.size(); i++) {
TunnelInfo tunnel = _tunnels.get(i);
if (tunnel.getExpiration() <= expireAfter)
// expires too soon, skip it
continue;
if (tunnel.getLength() <= 1) {
// Keep only the one that expires the latest.
if (zeroHopTunnel != null) {
if (zeroHopTunnel.getExpiration() > tunnel.getExpiration())
continue;
if (zeroHopLease != null)
leases.remove(zeroHopLease);
}
zeroHopTunnel = tunnel;
}
TunnelId inId = tunnel.getReceiveTunnelId(0);
Hash gw = tunnel.getPeer(0);
if ((inId == null) || (gw == null)) {
_log.error(toString() + ": broken? tunnel has no inbound gateway/tunnelId? " + tunnel);
continue;
}
Lease lease = new Lease();
// bugfix
// ExpireJob reduces the expiration, which causes a 2nd leaseset with the same lease
// to have an earlier expiration, so it isn't stored.
// Get the "real" expiration from the gateway hop config,
// HopConfig expirations are the same as the "real" expiration and don't change
// see configureNewTunnel()
lease.setEndDate(new Date(((TunnelCreatorConfig) tunnel).getConfig(0).getExpiration()));
lease.setTunnelId(inId);
lease.setGateway(gw);
leases.add(lease);
// remember in case we want to remove it for a later-expiring zero-hopper
if (tunnel.getLength() <= 1)
zeroHopLease = lease;
}
// Do we want a config option for this, or are there times when we shouldn't do this?
if (leases.size() < wanted) {
if (_log.shouldLog(Log.WARN))
_log.warn(toString() + ": Not enough leases (" + leases.size() + ", wanted " + wanted + ")");
if (leases.isEmpty())
return null;
}
LeaseSet ls = new LeaseSet();
Iterator<Lease> iter = leases.iterator();
int count = Math.min(leases.size(), wanted);
for (int i = 0; i < count; i++) ls.addLease(iter.next());
if (_log.shouldLog(Log.INFO))
_log.info(toString() + ": built new leaseSet: " + ls);
return ls;
}
use of net.i2p.data.TunnelId in project i2p.i2p by i2p.
the class FragmentHandler method receiveInitialFragment.
/**
* Handle the initial fragment in a message (or a full message, if it fits)
*
* @return offset after reading the full fragment
*/
private int receiveInitialFragment(byte[] preprocessed, int offset, int length) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("initial begins at " + offset + " for " + length);
int type = (preprocessed[offset] & MASK_TYPE) >>> 5;
boolean fragmented = (0 != (preprocessed[offset] & MASK_FRAGMENTED));
boolean extended = (0 != (preprocessed[offset] & MASK_EXTENDED));
offset++;
TunnelId tunnelId = null;
Hash router = null;
long messageId = -1;
if (type == TYPE_TUNNEL) {
if (offset + 4 >= preprocessed.length)
return -1;
long id = DataHelper.fromLong(preprocessed, offset, 4);
tunnelId = new TunnelId(id);
offset += 4;
}
if ((type == TYPE_ROUTER) || (type == TYPE_TUNNEL)) {
if (offset + Hash.HASH_LENGTH >= preprocessed.length)
return -1;
// byte h[] = new byte[Hash.HASH_LENGTH];
// System.arraycopy(preprocessed, offset, h, 0, Hash.HASH_LENGTH);
// router = new Hash(h);
router = Hash.create(preprocessed, offset);
offset += Hash.HASH_LENGTH;
}
if (fragmented) {
if (offset + 4 >= preprocessed.length)
return -1;
messageId = DataHelper.fromLong(preprocessed, offset, 4);
if (_log.shouldLog(Log.DEBUG))
_log.debug("reading messageId " + messageId + " at offset " + offset + " type = " + type + " router = " + (router != null ? router.toBase64().substring(0, 4) : "n/a") + " tunnelId = " + tunnelId);
offset += 4;
}
if (extended) {
int extendedSize = preprocessed[offset] & 0xff;
offset++;
// we don't interpret these yet, but skip them for now
offset += extendedSize;
}
if (offset + 2 >= preprocessed.length)
return -1;
int size = (int) DataHelper.fromLong(preprocessed, offset, 2);
offset += 2;
if (type == TYPE_UNDEF) {
// to OutboundMessageDistributor.distribute() which will NPE
if (_log.shouldLog(Log.WARN))
_log.warn("Dropping msg at tunnel endpoint with unsupported delivery instruction type " + type + " rcvr: " + _receiver);
} else if (fragmented) {
FragmentedMessage msg;
synchronized (_fragmentedMessages) {
msg = _fragmentedMessages.get(Long.valueOf(messageId));
if (msg == null) {
msg = new FragmentedMessage(_context, messageId);
_fragmentedMessages.put(Long.valueOf(messageId), msg);
}
}
// synchronized is required, fragments may be arriving in different threads
synchronized (msg) {
boolean ok = msg.receive(preprocessed, offset, size, false, router, tunnelId);
if (!ok)
return -1;
if (msg.isComplete()) {
synchronized (_fragmentedMessages) {
_fragmentedMessages.remove(Long.valueOf(messageId));
}
if (msg.getExpireEvent() != null)
msg.getExpireEvent().cancel();
receiveComplete(msg);
} else {
noteReception(msg.getMessageId(), 0, msg);
if (msg.getExpireEvent() == null) {
RemoveFailed evt = new RemoveFailed(msg);
msg.setExpireEvent(evt);
if (_log.shouldLog(Log.DEBUG))
_log.debug("In " + MAX_DEFRAGMENT_TIME + " dropping " + messageId);
evt.schedule(MAX_DEFRAGMENT_TIME);
}
}
}
} else {
// Unfragmented
// synchronized not required
// always complete, never an expire event
receiveComplete(preprocessed, offset, size, router, tunnelId);
}
offset += size;
// _log.debug("Handling finished message " + msg.getMessageId() + " at offset " + offset);
return offset;
}
use of net.i2p.data.TunnelId in project i2p.i2p by i2p.
the class TunnelDispatcher method getNewOBGWID.
/**
* Get a new random send tunnel ID that isn't a dup.
* Note that we do not keep track of IDs for pending builds so this
* does not fully prevent joinOutbound() from failing later.
* @since 0.9.5
*/
public long getNewOBGWID() {
long rv;
TunnelId tid;
do {
rv = 1 + _context.random().nextLong(TunnelId.MAX_ID_VALUE);
tid = new TunnelId(rv);
} while (_outboundGateways.containsKey(tid));
return rv;
}
use of net.i2p.data.TunnelId in project i2p.i2p by i2p.
the class TunnelDispatcher method getNewIBEPID.
/**
* Get a new random receive tunnel ID that isn't a dup.
* Not for zero hop tunnels.
* Note that we do not keep track of IDs for pending builds so this
* does not fully prevent joinInbound() from failing later.
* @since 0.9.5
*/
public long getNewIBEPID() {
long rv;
TunnelId tid;
do {
rv = 1 + _context.random().nextLong(TunnelId.MAX_ID_VALUE);
tid = new TunnelId(rv);
} while (_participants.containsKey(tid));
return rv;
}
Aggregations