Compare commits

...

4 Commits

7 changed files with 32 additions and 43 deletions

View File

@ -375,6 +375,7 @@ public class RouterContext extends I2PAppContext {
public SegmentedNetworkDatabaseFacade netDbSegmentor() { return _netDb; }
public FloodfillNetworkDatabaseFacade netDb() { return _netDb.mainNetDB(); }
public FloodfillNetworkDatabaseFacade mainNetDb() { return _netDb.mainNetDB(); }
public FloodfillNetworkDatabaseFacade multihomeNetDb() { return _netDb.multiHomeNetDB(); }
public FloodfillNetworkDatabaseFacade clientNetDb(String id) { return _netDb.clientNetDB(id); }
public FloodfillNetworkDatabaseFacade clientNetDb(Hash id) { return _netDb.clientNetDB(id); }
/**

View File

@ -147,7 +147,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
// Only send it out if it is in our estimated keyspace.
// For this, we do NOT use their dontInclude list as it can't be trusted
// (i.e. it could mess up the closeness calculation)
LeaseSet possibleMultihomed = getContext().clientMessagePool().getCache().multihomedCache.get(searchKey);
LeaseSet possibleMultihomed = getContext().multihomeNetDb().lookupLeaseSetLocally(searchKey);
Set<Hash> closestHashes = getContext().mainNetDb().findNearestRouters(searchKey,
CLOSENESS_THRESHOLD, null);
if (weAreClosest(closestHashes)) {
@ -166,20 +166,11 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
// If it's in the possibleMultihomed cache, then it was definitely stored to us meaning it is effectively
// always recievedAsPublished. No need to decide whether or not to answer the request like above, just
// answer it so it doesn't look different from other stores.
if (possibleMultihomed.isCurrent(Router.CLOCK_FUDGE_FACTOR / 4) && possibleMultihomed.getReceivedAsPublished()) {
if (possibleMultihomed.getReceivedAsPublished()) {
if (_log.shouldLog(Log.INFO))
_log.info("We have local LS, possibly from a multihomed router " + searchKey + ", and somebody requested it back from us. Answering query, as if in our keyspace, to avoid attack.");
getContext().statManager().addRateData("netDb.lookupsMatchedLocalMultihome", 1);
sendData(searchKey, possibleMultihomed, fromKey, toTunnel);
} else {
// if it expired, remove it from the cache.
getContext().clientMessagePool().getCache().multihomedCache.remove(searchKey);
// Lie, pretend we don't have it
if (_log.shouldLog(Log.INFO))
_log.info("We have local LS " + searchKey + ", NOT answering query, out of our keyspace");
getContext().statManager().addRateData("netDb.lookupsMatchedLocalNotClosest", 1);
Set<Hash> routerHashSet = getNearestRouters(lookupType);
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
}
} else {
// Lie, pretend we don't have it
@ -190,27 +181,13 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
}
} else {
LeaseSet possibleMultihomed = getContext().clientMessagePool().getCache().multihomedCache.get(searchKey);
LeaseSet possibleMultihomed = getContext().multihomeNetDb().lookupLeaseSetLocally(searchKey);
if (possibleMultihomed != null) {
if (possibleMultihomed.isCurrent(Router.CLOCK_FUDGE_FACTOR / 4) && possibleMultihomed.getReceivedAsPublished()) {
if (possibleMultihomed.getReceivedAsPublished()) {
if (_log.shouldLog(Log.INFO))
_log.info("We have local LS, possibly from a multihomed router " + searchKey + ", and somebody requested it back from us. Answering query, as if in our keyspace, to avoid attack.");
_log.info("We have local LS " + searchKey + " in our multihomes cache meaning it was stored to us. Answering query with the stored LS.");
getContext().statManager().addRateData("netDb.lookupsMatchedLocalMultihome", 1);
sendData(searchKey, possibleMultihomed, fromKey, toTunnel);
} else {
// if it expired, remove it from the cache.
getContext().clientMessagePool().getCache().multihomedCache.remove(searchKey);
// It was not published to us (we looked it up, for example)
// or it's local and we aren't floodfill,
// or it's local and we don't publish it.
// Lie, pretend we don't have it
if (_log.shouldLog(Log.INFO))
_log.info("We have LS " + searchKey +
", NOT answering query - local? " + isLocal + " shouldPublish? " + shouldPublishLocal +
" RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply());
getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1);
Set<Hash> routerHashSet = getNearestRouters(lookupType);
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
}
} else {
// It was not published to us (we looked it up, for example)

View File

@ -481,7 +481,11 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
}
@Override
public boolean floodfillEnabled() { return _floodfillEnabled; }
public boolean floodfillEnabled() {
if (isMultihomeDb())
return _context.mainNetDb().floodfillEnabled();
return _floodfillEnabled;
}
/**
* @param peer may be null, returns false if null

View File

@ -63,8 +63,8 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
private RouterContext _context;
private Map<String, FloodfillNetworkDatabaseFacade> _subDBs = new HashMap<String, FloodfillNetworkDatabaseFacade>();
public static final String MAIN_DBID = "main";
private static final String MULTIHOME_DBID = "multihome";
private static final String EXPLORATORY_DBID = "exploratory";
public static final String MULTIHOME_DBID = "clients_multihome";
private static final String EXPLORATORY_DBID = "clients_exploratory";
private final FloodfillNetworkDatabaseFacade _mainDbid;
private final FloodfillNetworkDatabaseFacade _multihomeDbid;
private final FloodfillNetworkDatabaseFacade _exploratoryDbid;

View File

@ -155,16 +155,7 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
getContext().statManager().addRateData("netDb.storeLocalLeaseSetAttempt", 1, 0);
// throw rather than return, so that we send the ack below (prevent easy attack)
dontBlamePeer = true;
// store the peer in the outboundCache instead so that we can reply back with it without confusing ourselves.
if (ls.isCurrent(Router.CLOCK_FUDGE_FACTOR / 4)) {
if (_facade.validate(key, ls) == null) {
LeaseSet compareLeasesetDate = getContext().clientMessagePool().getCache().multihomedCache.get(key);
if (compareLeasesetDate == null)
getContext().clientMessagePool().getCache().multihomedCache.put(key, ls);
else if (compareLeasesetDate.getEarliestLeaseDate() < ls.getEarliestLeaseDate())
getContext().clientMessagePool().getCache().multihomedCache.put(key, ls);
}
}
getContext().multihomeNetDb().store(key, ls);
throw new IllegalArgumentException("(dbid: " + _facade._dbid
+ ") Peer attempted to store local leaseSet: "
+ key.toBase32());

View File

@ -174,14 +174,18 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
public KademliaNetworkDatabaseFacade(RouterContext context, String dbid) {
_context = context;
_dbid = dbid;
_log = _context.logManager().getLog(getClass());
_networkID = context.router().getNetworkID();
_peerSelector = createPeerSelector();
_publishingLeaseSets = new HashMap<Hash, RepublishLeaseSetJob>(8);
_activeRequests = new HashMap<Hash, SearchJob>(8);
_reseedChecker = new ReseedChecker(context);
if (isClientDb())
_reseedChecker = null;
else
_reseedChecker = new ReseedChecker(context);
_blindCache = new BlindCache(context);
_dbid = dbid;
_localKey = null;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Created KademliaNetworkDatabaseFacade for id: " + dbid);
@ -214,6 +218,8 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
/** @since 0.9 */
@Override
public ReseedChecker reseedChecker() {
if (isClientDb())
return null;
return _reseedChecker;
}
@ -304,6 +310,10 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
return _dbid.startsWith("clients_");
}
public boolean isMultihomeDb() {
return _dbid.equals(FloodfillNetworkDatabaseSegmentor.MULTIHOME_DBID);
}
public synchronized void startup() {
_log.info("Starting up the kademlia network database");
RouterInfo ri = _context.router().getRouterInfo();

View File

@ -483,6 +483,10 @@ public class PersistentDataStore extends TransientDataStore {
_lastReseed = _context.clock().now();
_setNetDbReady = true;
setNetDbReady();
} else if (_facade.isMultihomeDb()) {
_lastReseed = _context.clock().now();
_setNetDbReady = true;
setNetDbReady();
} else if (_facade.reseedChecker().checkReseed(routerCount)) {
_lastReseed = _context.clock().now();
// checkReseed will call wakeup() when done and we will run again
@ -494,6 +498,8 @@ public class PersistentDataStore extends TransientDataStore {
int count = Math.min(routerCount, size());
if (_facade.isClientDb()) {
_lastReseed = _context.clock().now();
} else if (_facade.isMultihomeDb()) {
_lastReseed = _context.clock().now();
} else if (count < MIN_ROUTERS) {
if (_facade.reseedChecker().checkReseed(count))
_lastReseed = _context.clock().now();