Search in sources :

Example 1 with PublishLocalRouterInfoJob

use of net.i2p.router.networkdb.PublishLocalRouterInfoJob in project i2p.i2p by i2p.

the class KademliaNetworkDatabaseFacade method startup.

public synchronized void startup() {
    _log.info("Starting up the kademlia network database");
    RouterInfo ri = _context.router().getRouterInfo();
    String dbDir = _context.getProperty(PROP_DB_DIR, DEFAULT_DB_DIR);
    _kb = new KBucketSet<Hash>(_context, ri.getIdentity().getHash(), BUCKET_SIZE, KAD_B, new RejectTrimmer<Hash>());
    try {
        _ds = new PersistentDataStore(_context, dbDir, this);
    } catch (IOException ioe) {
        throw new RuntimeException("Unable to initialize netdb storage", ioe);
    }
    // _ds = new TransientDataStore();
    // _exploreKeys = new HashSet(64);
    _dbDir = dbDir;
    _negativeCache = new NegativeLookupCache(_context);
    createHandlers();
    _initialized = true;
    _started = System.currentTimeMillis();
    // expire old leases
    Job elj = new ExpireLeasesJob(_context, this);
    elj.getTiming().setStartAfter(_context.clock().now() + 2 * 60 * 1000);
    _context.jobQueue().addJob(elj);
    // Don't run until after RefreshRoutersJob has run, and after validate() will return invalid for old routers.
    if (!_context.commSystem().isDummy()) {
        Job erj = new ExpireRoutersJob(_context, this);
        erj.getTiming().setStartAfter(_context.clock().now() + ROUTER_INFO_EXPIRATION_FLOODFILL + 10 * 60 * 1000);
        _context.jobQueue().addJob(erj);
    }
    if (!QUIET) {
        // _context.jobQueue().addJob(new ExploreKeySelectorJob(_context, this));
        if (_exploreJob == null)
            _exploreJob = new StartExplorersJob(_context, this);
        // fire off a group of searches from the explore pool
        // Don't start it right away, so we don't send searches for random keys
        // out our 0-hop exploratory tunnels (generating direct connections to
        // one or more floodfill peers within seconds of startup).
        // We're trying to minimize the ff connections to lessen the load on the
        // floodfills, and in any case let's try to build some real expl. tunnels first.
        // No rush, it only runs every 30m.
        _exploreJob.getTiming().setStartAfter(_context.clock().now() + EXPLORE_JOB_DELAY);
        _context.jobQueue().addJob(_exploreJob);
    } else {
        _log.warn("Operating in quiet mode - not exploring or pushing data proactively, simply reactively");
        _log.warn("This should NOT be used in production");
    }
    // periodically update and resign the router's 'published date', which basically
    // serves as a version
    Job plrij = new PublishLocalRouterInfoJob(_context);
    // do not delay this, as this creates the RI too, and we need a good local routerinfo right away
    // plrij.getTiming().setStartAfter(_context.clock().now() + PUBLISH_JOB_DELAY);
    _context.jobQueue().addJob(plrij);
// plrij calls publish() for us
// try {
// publish(ri);
// } catch (IllegalArgumentException iae) {
// _context.router().rebuildRouterInfo(true);
// //_log.log(Log.CRIT, "Our local router info is b0rked, clearing from scratch", iae);
// //_context.router().rebuildNewIdentity();
// }
}
Also used : PublishLocalRouterInfoJob(net.i2p.router.networkdb.PublishLocalRouterInfoJob) RouterInfo(net.i2p.data.router.RouterInfo) IOException(java.io.IOException) Hash(net.i2p.data.Hash) PublishLocalRouterInfoJob(net.i2p.router.networkdb.PublishLocalRouterInfoJob) Job(net.i2p.router.Job) RejectTrimmer(net.i2p.kademlia.RejectTrimmer)

Aggregations

IOException (java.io.IOException)1 Hash (net.i2p.data.Hash)1 RouterInfo (net.i2p.data.router.RouterInfo)1 RejectTrimmer (net.i2p.kademlia.RejectTrimmer)1 Job (net.i2p.router.Job)1 PublishLocalRouterInfoJob (net.i2p.router.networkdb.PublishLocalRouterInfoJob)1