forked from I2P_Developers/i2p.i2p
Compare commits
5 Commits
i2p.i2p.2.
...
i2p.i2p.2.
Author | SHA1 | Date | |
---|---|---|---|
28d045cba5 | |||
c5f8679177 | |||
65e2b5b271 | |||
80a2aeafb0 | |||
c6a97a1843 |
@ -375,6 +375,7 @@ public class RouterContext extends I2PAppContext {
|
||||
public SegmentedNetworkDatabaseFacade netDbSegmentor() { return _netDb; }
|
||||
public FloodfillNetworkDatabaseFacade netDb() { return _netDb.mainNetDB(); }
|
||||
public FloodfillNetworkDatabaseFacade mainNetDb() { return _netDb.mainNetDB(); }
|
||||
public FloodfillNetworkDatabaseFacade multihomeNetDb() { return _netDb.multiHomeNetDB(); }
|
||||
public FloodfillNetworkDatabaseFacade exploratoryNetDb() { return _netDb.exploratoryNetDB(); }
|
||||
public FloodfillNetworkDatabaseFacade clientNetDb(String id) { return _netDb.clientNetDB(id); }
|
||||
/**
|
||||
|
@ -448,6 +448,7 @@ class ClientConnectionRunner {
|
||||
if (id == null)
|
||||
return;
|
||||
boolean isPrimary = false;
|
||||
String dbid = this.getDestHash().toBase32();
|
||||
for (Iterator<SessionParams> iter = _sessions.values().iterator(); iter.hasNext(); ) {
|
||||
SessionParams sp = iter.next();
|
||||
if (id.equals(sp.sessionId)) {
|
||||
|
@ -44,6 +44,7 @@ public class DummyNetworkDatabaseFacade extends SegmentedNetworkDatabaseFacade {
|
||||
|
||||
public void restart() {}
|
||||
public void shutdown() {}
|
||||
public void remove(String dbid){}
|
||||
public void startup() {
|
||||
RouterInfo info = _context.router().getRouterInfo();
|
||||
_routers.put(info.getIdentity().getHash(), info);
|
||||
|
@ -95,15 +95,6 @@ public class OutboundCache {
|
||||
*/
|
||||
final Map<HashPair, Long> lastReplyRequestCache = new ConcurrentHashMap<HashPair, Long>(64);
|
||||
|
||||
|
||||
/**
|
||||
* This cache is used to keep track of when we recieve a leaseSet from a router
|
||||
* we are multihomed with, or otherwise are asked to store a valid routerInfo for
|
||||
* a destination which we also host.
|
||||
*/
|
||||
|
||||
public final ConcurrentHashMap<Hash, LeaseSet> multihomedCache = new ConcurrentHashMap<Hash, LeaseSet>(64);
|
||||
|
||||
private final RouterContext _context;
|
||||
|
||||
private static final int CLEAN_INTERVAL = 5*60*1000;
|
||||
|
@ -147,7 +147,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
// Only send it out if it is in our estimated keyspace.
|
||||
// For this, we do NOT use their dontInclude list as it can't be trusted
|
||||
// (i.e. it could mess up the closeness calculation)
|
||||
LeaseSet possibleMultihomed = getContext().clientMessagePool().getCache().multihomedCache.get(searchKey);
|
||||
LeaseSet possibleMultihomed = getContext().multihomeNetDb().lookupLeaseSetLocally(searchKey);
|
||||
Set<Hash> closestHashes = getContext().mainNetDb().findNearestRouters(searchKey,
|
||||
CLOSENESS_THRESHOLD, null);
|
||||
if (weAreClosest(closestHashes)) {
|
||||
@ -166,20 +166,11 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
// If it's in the possibleMultihomed cache, then it was definitely stored to us meaning it is effectively
|
||||
// always recievedAsPublished. No need to decide whether or not to answer the request like above, just
|
||||
// answer it so it doesn't look different from other stores.
|
||||
if (possibleMultihomed.isCurrent(Router.CLOCK_FUDGE_FACTOR / 4) && possibleMultihomed.getReceivedAsPublished()) {
|
||||
if (possibleMultihomed.getReceivedAsPublished()) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("We have local LS, possibly from a multihomed router " + searchKey + ", and somebody requested it back from us. Answering query, as if in our keyspace, to avoid attack.");
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalMultihome", 1);
|
||||
sendData(searchKey, possibleMultihomed, fromKey, toTunnel);
|
||||
} else {
|
||||
// if it expired, remove it from the cache.
|
||||
getContext().clientMessagePool().getCache().multihomedCache.remove(searchKey);
|
||||
// Lie, pretend we don't have it
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("We have local LS " + searchKey + ", NOT answering query, out of our keyspace");
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalNotClosest", 1);
|
||||
Set<Hash> routerHashSet = getNearestRouters(lookupType);
|
||||
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
|
||||
}
|
||||
} else {
|
||||
// Lie, pretend we don't have it
|
||||
@ -190,27 +181,13 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
|
||||
}
|
||||
} else {
|
||||
LeaseSet possibleMultihomed = getContext().clientMessagePool().getCache().multihomedCache.get(searchKey);
|
||||
LeaseSet possibleMultihomed = getContext().multihomeNetDb().lookupLeaseSetLocally(searchKey); //clientMessagePool().getCache().multihomedCache.get(searchKey);
|
||||
if (possibleMultihomed != null) {
|
||||
if (possibleMultihomed.isCurrent(Router.CLOCK_FUDGE_FACTOR / 4) && possibleMultihomed.getReceivedAsPublished()) {
|
||||
if (possibleMultihomed.getReceivedAsPublished()) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("We have local LS, possibly from a multihomed router " + searchKey + ", and somebody requested it back from us. Answering query, as if in our keyspace, to avoid attack.");
|
||||
_log.info("We have local LS " + searchKey + " in our multihomes cache meaning it was stored to us. Answering query with the stored LS.");
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalMultihome", 1);
|
||||
sendData(searchKey, possibleMultihomed, fromKey, toTunnel);
|
||||
} else {
|
||||
// if it expired, remove it from the cache.
|
||||
getContext().clientMessagePool().getCache().multihomedCache.remove(searchKey);
|
||||
// It was not published to us (we looked it up, for example)
|
||||
// or it's local and we aren't floodfill,
|
||||
// or it's local and we don't publish it.
|
||||
// Lie, pretend we don't have it
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("We have LS " + searchKey +
|
||||
", NOT answering query - local? " + isLocal + " shouldPublish? " + shouldPublishLocal +
|
||||
" RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply());
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1);
|
||||
Set<Hash> routerHashSet = getNearestRouters(lookupType);
|
||||
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
|
||||
}
|
||||
} else {
|
||||
// It was not published to us (we looked it up, for example)
|
||||
|
@ -507,7 +507,11 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean floodfillEnabled() { return _floodfillEnabled; }
|
||||
public boolean floodfillEnabled() {
|
||||
if (isMultihomeDb())
|
||||
return _context.mainNetDb().floodfillEnabled();
|
||||
return _floodfillEnabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param peer may be null, returns false if null
|
||||
|
@ -27,7 +27,7 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
||||
private RouterContext _context;
|
||||
private Map<String, FloodfillNetworkDatabaseFacade> _subDBs = new HashMap<String, FloodfillNetworkDatabaseFacade>();
|
||||
public static final String MAIN_DBID = "main";
|
||||
private static final String MULTIHOME_DBID = "multihome";
|
||||
public static final String MULTIHOME_DBID = "multihome";
|
||||
|
||||
public FloodfillNetworkDatabaseSegmentor(RouterContext context) {
|
||||
super(context);
|
||||
@ -956,4 +956,18 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public void remove(String dbid) {
|
||||
if (dbid != null) {
|
||||
if (dbid.endsWith(".i2p") && !dbid.startsWith("clients_"))
|
||||
dbid = "clients_" + dbid;
|
||||
else if (dbid.equals(""))
|
||||
dbid = MAIN_DBID;
|
||||
GetSubNetDB(dbid).shutdown();
|
||||
_subDBs.remove(dbid);
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("remove called with null dbid, refusing to remove main DB");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -155,16 +155,9 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
|
||||
getContext().statManager().addRateData("netDb.storeLocalLeaseSetAttempt", 1, 0);
|
||||
// throw rather than return, so that we send the ack below (prevent easy attack)
|
||||
dontBlamePeer = true;
|
||||
// store the peer in the outboundCache instead so that we can reply back with it without confusing ourselves.
|
||||
if (ls.isCurrent(Router.CLOCK_FUDGE_FACTOR / 4)) {
|
||||
if (_facade.validate(key, ls) == null) {
|
||||
LeaseSet compareLeasesetDate = getContext().clientMessagePool().getCache().multihomedCache.get(key);
|
||||
if (compareLeasesetDate == null)
|
||||
getContext().clientMessagePool().getCache().multihomedCache.put(key, ls);
|
||||
else if (compareLeasesetDate.getEarliestLeaseDate() < ls.getEarliestLeaseDate())
|
||||
getContext().clientMessagePool().getCache().multihomedCache.put(key, ls);
|
||||
}
|
||||
}
|
||||
// get the router context and ask to store it in the multihome DBID
|
||||
RouterContext ctx = getContext();
|
||||
ctx.multihomeNetDb().store(key, ls);
|
||||
throw new IllegalArgumentException("(dbid: " + _facade._dbid
|
||||
+ ") Peer attempted to store local leaseSet: "
|
||||
+ key.toBase32());
|
||||
|
@ -304,6 +304,10 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
return _dbid.startsWith("clients_");
|
||||
}
|
||||
|
||||
protected boolean isMultihomeDb() {
|
||||
return _dbid.equals(FloodfillNetworkDatabaseSegmentor.MULTIHOME_DBID);
|
||||
}
|
||||
|
||||
public synchronized void startup() {
|
||||
_log.info("Starting up the kademlia network database");
|
||||
RouterInfo ri = _context.router().getRouterInfo();
|
||||
|
@ -483,6 +483,10 @@ public class PersistentDataStore extends TransientDataStore {
|
||||
_lastReseed = _context.clock().now();
|
||||
_setNetDbReady = true;
|
||||
setNetDbReady();
|
||||
} else if (_facade.isMultihomeDb()) {
|
||||
_lastReseed = _context.clock().now();
|
||||
_setNetDbReady = true;
|
||||
setNetDbReady();
|
||||
} else if (_facade.reseedChecker().checkReseed(routerCount)) {
|
||||
_lastReseed = _context.clock().now();
|
||||
// checkReseed will call wakeup() when done and we will run again
|
||||
@ -494,6 +498,8 @@ public class PersistentDataStore extends TransientDataStore {
|
||||
int count = Math.min(routerCount, size());
|
||||
if (_facade.isClientDb()) {
|
||||
_lastReseed = _context.clock().now();
|
||||
} else if (_facade.isMultihomeDb()) {
|
||||
_lastReseed = _context.clock().now();
|
||||
} else if (count < MIN_ROUTERS) {
|
||||
if (_facade.reseedChecker().checkReseed(count))
|
||||
_lastReseed = _context.clock().now();
|
||||
|
@ -332,4 +332,6 @@ public abstract class SegmentedNetworkDatabaseFacade { // extends FloodfillNetwo
|
||||
}
|
||||
|
||||
public abstract String getDbidByHash(Hash clientKey);
|
||||
|
||||
public abstract void remove(String dbid);
|
||||
}
|
||||
|
Reference in New Issue
Block a user