forked from I2P_Developers/i2p.i2p
Compare commits
29 Commits
i2p.i2p.2.
...
i2p.i2p.2.
Author | SHA1 | Date | |
---|---|---|---|
564e2101e7 | |||
5c5cd8e909 | |||
5b8f0d41f1 | |||
d1d1ee157f | |||
fc447453f0 | |||
386b25c3b6 | |||
bda12fb627 | |||
678c3db2f2 | |||
42c83194e2 | |||
6f8460607a | |||
fa7b313134 | |||
e26754f9ed | |||
e6c74fb494 | |||
8805bf0944 | |||
c2e8a9716e | |||
e912f20ba6 | |||
61abf49f3d | |||
646d2623bf | |||
12c1c9459d | |||
f23ac402ba | |||
13bcb7fcd5 | |||
f6c6fb8bbb | |||
9eac400261 | |||
14f4323889 | |||
cc5e3e94d0 | |||
4cbaad5e50 | |||
80cc0bb1ce | |||
d3e71f6f70 | |||
9de1dd46e1 |
@ -263,12 +263,20 @@ public abstract class DatabaseEntry extends DataStructureImpl {
|
||||
*
|
||||
* @since 0.9.58 moved up from LeaseSet
|
||||
*/
|
||||
public boolean getReceivedAsPublished() { return _receivedAsPublished; }
|
||||
public boolean getReceivedAsPublished() {
|
||||
return _receivedAsPublished;
|
||||
}
|
||||
|
||||
/**
|
||||
* @since 0.9.58 moved up from LeaseSet
|
||||
*
|
||||
* use this carefully, when updating the flags make sure the old and new
|
||||
* leaseSet are actually equivalent, or simply copy over the reply value,
|
||||
* see KademliaNetworkDatabaseFacade.java line 997 for more information.
|
||||
*/
|
||||
public void setReceivedAsPublished(boolean received) { _receivedAsPublished = received; }
|
||||
public void setReceivedAsPublished() {
|
||||
_receivedAsPublished = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* If true, we received this LeaseSet by searching for it
|
||||
@ -276,12 +284,16 @@ public abstract class DatabaseEntry extends DataStructureImpl {
|
||||
*
|
||||
* @since 0.7.14, moved up from LeaseSet in 0.9.58
|
||||
*/
|
||||
public boolean getReceivedAsReply() { return _receivedAsReply; }
|
||||
public boolean getReceivedAsReply() {
|
||||
return _receivedAsReply;
|
||||
}
|
||||
|
||||
/**
|
||||
* set to true
|
||||
*
|
||||
* @since 0.7.14, moved up from LeaseSet in 0.9.58
|
||||
*/
|
||||
public void setReceivedAsReply() { _receivedAsReply = true; }
|
||||
public void setReceivedAsReply() {
|
||||
_receivedAsReply = true;
|
||||
}
|
||||
}
|
||||
|
@ -1,3 +1,11 @@
|
||||
2023-06-28 idk
|
||||
* Cache stores of multihomed leaseSets when stored from multihome peers,
|
||||
and if our local leaseSet is not in the keyspace return the multihome instead
|
||||
* When updating a leaseSet because recieving it as published always make a
|
||||
complete copy of the leaseSet before merging the flags.
|
||||
* Rate-Limit lookups
|
||||
* I2P 2.3.0
|
||||
|
||||
2023-05-29 idk
|
||||
* adds "virtual contexts" to bloom filter, where each entity that
|
||||
passes an i2np message to the bloom filter xor's the messageID with a random, local value.
|
||||
|
@ -31,6 +31,10 @@ public class ClientMessagePool {
|
||||
_cache = new OutboundCache(_context);
|
||||
OutboundClientMessageOneShotJob.init(_context);
|
||||
}
|
||||
|
||||
public OutboundCache getCache() {
|
||||
return _cache;
|
||||
}
|
||||
|
||||
/**
|
||||
* @since 0.8.8
|
||||
|
@ -95,6 +95,15 @@ public class OutboundCache {
|
||||
*/
|
||||
final Map<HashPair, Long> lastReplyRequestCache = new ConcurrentHashMap<HashPair, Long>(64);
|
||||
|
||||
|
||||
/**
|
||||
* This cache is used to keep track of when we recieve a leaseSet from a router
|
||||
* we are multihomed with, or otherwise are asked to store a valid routerInfo for
|
||||
* a destination which we also host.
|
||||
*/
|
||||
|
||||
public final ConcurrentHashMap<Hash, LeaseSet> multihomedCache = new ConcurrentHashMap<Hash, LeaseSet>(64);
|
||||
|
||||
private final RouterContext _context;
|
||||
|
||||
private static final int CLEAN_INTERVAL = 5*60*1000;
|
||||
|
@ -147,14 +147,40 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
// Only send it out if it is in our estimated keyspace.
|
||||
// For this, we do NOT use their dontInclude list as it can't be trusted
|
||||
// (i.e. it could mess up the closeness calculation)
|
||||
LeaseSet possibleMultihomed = getContext().clientMessagePool().getCache().multihomedCache.get(searchKey);
|
||||
Set<Hash> closestHashes = getContext().netDb().findNearestRouters(searchKey,
|
||||
CLOSENESS_THRESHOLD, null);
|
||||
if (weAreClosest(closestHashes)) {
|
||||
// It's in our keyspace, so give it to them
|
||||
// there is a slight chance that there is also a multihomed router in our cache at the
|
||||
// same time we are closest to our locally published leaseSet. That means there is a slight
|
||||
// chance an attacker can send a least as a store which goes into the multihome cache, then
|
||||
// fetch back a locally-created, locally-published leaseset. BUT, if we always publish a
|
||||
// multihomed leaseset even if we are closest to the local, we never send it out if a potential
|
||||
// multihome is found in the cache.
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("We have local LS " + searchKey + ", answering query, in our keyspace");
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalClosest", 1);
|
||||
sendData(searchKey, ls, fromKey, toTunnel);
|
||||
} else if (possibleMultihomed != null) {
|
||||
if (possibleMultihomed.isCurrent(Router.CLOCK_FUDGE_FACTOR / 4)) {
|
||||
// If it's in the possibleMultihomed cache, then it was definitely stored to us meaning it is effectively
|
||||
// always recievedAsPublished. No need to decide whether or not to answer the request like above, just
|
||||
// answer it so it doesn't look different from other stores.
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("We have local LS, possibly from a multihomed router " + searchKey + ", and somebody requested it back from us. Answering query, as if in our keyspace, to avoid attack.");
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalMultihome", 1);
|
||||
sendData(searchKey, possibleMultihomed, fromKey, toTunnel);
|
||||
} else {
|
||||
// if it expired, remove it from the cache.
|
||||
getContext().clientMessagePool().getCache().multihomedCache.remove(searchKey);
|
||||
// Lie, pretend we don't have it
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("We have local LS " + searchKey + ", NOT answering query, out of our keyspace");
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalNotClosest", 1);
|
||||
Set<Hash> routerHashSet = getNearestRouters(lookupType);
|
||||
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
|
||||
}
|
||||
} else {
|
||||
// Lie, pretend we don't have it
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
@ -164,17 +190,44 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
|
||||
}
|
||||
} else {
|
||||
// It was not published to us (we looked it up, for example)
|
||||
// or it's local and we aren't floodfill,
|
||||
// or it's local and we don't publish it.
|
||||
// Lie, pretend we don't have it
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("We have LS " + searchKey +
|
||||
", NOT answering query - local? " + isLocal + " shouldPublish? " + shouldPublishLocal +
|
||||
" RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply());
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1);
|
||||
Set<Hash> routerHashSet = getNearestRouters(lookupType);
|
||||
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
|
||||
LeaseSet possibleMultihomed = getContext().clientMessagePool().getCache().multihomedCache.get(searchKey);
|
||||
if (possibleMultihomed != null) {
|
||||
if (possibleMultihomed.isCurrent(Router.CLOCK_FUDGE_FACTOR / 4)) {
|
||||
// If it's in the possibleMultihomed cache, then it was definitely stored to us meaning it is effectively
|
||||
// always recievedAsPublished. No need to decide whether or not to answer the request like above, just
|
||||
// answer it so it doesn't look different from other stores.
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("We have local LS, possibly from a multihomed router " + searchKey + ", and somebody requested it back from us. Answering query, as if in our keyspace, to avoid attack.");
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalMultihome", 1);
|
||||
sendData(searchKey, possibleMultihomed, fromKey, toTunnel);
|
||||
} else {
|
||||
// if it expired, remove it from the cache.
|
||||
getContext().clientMessagePool().getCache().multihomedCache.remove(searchKey);
|
||||
// It was not published to us (we looked it up, for example)
|
||||
// or it's local and we aren't floodfill,
|
||||
// or it's local and we don't publish it.
|
||||
// Lie, pretend we don't have it
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("We have LS " + searchKey +
|
||||
", NOT answering query - local? " + isLocal + " shouldPublish? " + shouldPublishLocal +
|
||||
" RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply());
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1);
|
||||
Set<Hash> routerHashSet = getNearestRouters(lookupType);
|
||||
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
|
||||
}
|
||||
} else {
|
||||
// It was not published to us (we looked it up, for example)
|
||||
// or it's local and we aren't floodfill,
|
||||
// or it's local and we don't publish it.
|
||||
// Lie, pretend we don't have it
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("We have LS " + searchKey +
|
||||
", NOT answering query - local? " + isLocal + " shouldPublish? " + shouldPublishLocal +
|
||||
" RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply());
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1);
|
||||
Set<Hash> routerHashSet = getNearestRouters(lookupType);
|
||||
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
|
||||
}
|
||||
}
|
||||
} else if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO &&
|
||||
lookupType != DatabaseLookupMessage.Type.LS) {
|
||||
|
@ -48,14 +48,39 @@ public class FloodfillDatabaseLookupMessageHandler implements HandlerJobBuilder
|
||||
_context.statManager().addRateData("netDb.lookupsReceived", 1);
|
||||
|
||||
DatabaseLookupMessage dlm = (DatabaseLookupMessage)receivedMessage;
|
||||
if (!_facade.shouldThrottleLookup(dlm.getFrom(), dlm.getReplyTunnel())) {
|
||||
|
||||
if (_facade.shouldBanLookup(dlm.getFrom(), dlm.getReplyTunnel())) {
|
||||
if (_log.shouldLog(Log.WARN)) {
|
||||
_log.warn("Possibly throttling " + dlm.getSearchType() + " lookup request for " + dlm.getSearchKey() + " because requests are being sent extremely fast, reply was to: " + dlm.getFrom() + " tunnel: " + dlm.getReplyTunnel());
|
||||
_context.statManager().addRateData("netDb.repeatedLookupsDropped", 1);
|
||||
}
|
||||
/*
|
||||
* We don't do this yet, but we do ban routers who do much faster bursts of lookups
|
||||
* _context.banlist().banlistRouter(dlm.getFrom(), " <b>➜</b> Excessive lookup requests", null, null, _context.clock().now() + 4*60*60*1000);
|
||||
* _context.commSystem().mayDisconnect(dlm.getFrom());
|
||||
* _context.statManager().addRateData("netDb.lookupsDropped", 1);
|
||||
* return null;
|
||||
*/
|
||||
}
|
||||
if (_facade.shouldBanBurstLookup(dlm.getFrom(), dlm.getReplyTunnel())) {
|
||||
if (_log.shouldLog(Log.WARN)) {
|
||||
_log.warn("Banning " + dlm.getSearchType() + " lookup request for " + dlm.getSearchKey() + " because requests are being sent extremely fast in a very short time, reply was to: " + dlm.getFrom() + " tunnel: " + dlm.getReplyTunnel());
|
||||
_context.statManager().addRateData("netDb.repeatedBurstLookupsDropped", 1);
|
||||
}
|
||||
_context.banlist().banlistRouter(dlm.getFrom(), " <b>➜</b> Excessive lookup requests, burst", null, null, _context.clock().now() + 4*60*60*1000);
|
||||
_context.commSystem().mayDisconnect(dlm.getFrom());
|
||||
_context.statManager().addRateData("netDb.lookupsDropped", 1);
|
||||
return null;
|
||||
}
|
||||
if ((!_facade.shouldThrottleLookup(dlm.getFrom(), dlm.getReplyTunnel()) && !_facade.shouldThrottleBurstLookup(dlm.getFrom(), dlm.getReplyTunnel()))
|
||||
|| _context.routerHash().equals(dlm.getFrom())) {
|
||||
Job j = new HandleFloodfillDatabaseLookupMessageJob(_context, dlm, from, fromHash, _msgIDBloomXor);
|
||||
//if (false) {
|
||||
// // might as well inline it, all the heavy lifting is queued up in later jobs, if necessary
|
||||
// j.runJob();
|
||||
// return null;
|
||||
//} else {
|
||||
return j;
|
||||
return j;
|
||||
//}
|
||||
} else {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
|
@ -39,7 +39,16 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
private final Set<Hash> _verifiesInProgress;
|
||||
private FloodThrottler _floodThrottler;
|
||||
private LookupThrottler _lookupThrottler;
|
||||
private LookupThrottler _lookupThrottlerBurst;
|
||||
private LookupThrottler _lookupBanner;
|
||||
private LookupThrottler _lookupBannerBurst;
|
||||
private final Job _ffMonitor;
|
||||
private final int BAN_LOOKUP_BASE = 50;
|
||||
private final int BAN_LOOKUP_BASE_INTERVAL = 5*60*1000;
|
||||
private final int BAN_LOOKUP_BURST = 10;
|
||||
private final int BAN_LOOKUP_BURST_INTERVAL = 15*1000;
|
||||
private final int DROP_LOOKUP_BURST = 10;
|
||||
private final int DROP_LOOKUP_BURST_INTERVAL = 30*1000;
|
||||
|
||||
/**
|
||||
* This is the flood redundancy. Entries are
|
||||
@ -84,6 +93,9 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
super.startup();
|
||||
_context.jobQueue().addJob(_ffMonitor);
|
||||
_lookupThrottler = new LookupThrottler();
|
||||
_lookupBanner = new LookupThrottler(BAN_LOOKUP_BASE, BAN_LOOKUP_BASE_INTERVAL);
|
||||
_lookupThrottlerBurst = new LookupThrottler(DROP_LOOKUP_BURST, DROP_LOOKUP_BURST_INTERVAL);
|
||||
_lookupBannerBurst = new LookupThrottler(BAN_LOOKUP_BURST, BAN_LOOKUP_BURST_INTERVAL);
|
||||
|
||||
boolean isFF = _context.getBooleanProperty(FloodfillMonitorJob.PROP_FLOODFILL_PARTICIPANT);
|
||||
long down = _context.router().getEstimatedDowntime();
|
||||
@ -180,14 +192,38 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
// of the flooding - instead, send them to a random floodfill peer so *they* can flood 'em out.
|
||||
// perhaps statistically adjust this so we are the source every 1/N times... or something.
|
||||
if (floodfillEnabled() && (ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)) {
|
||||
flood(ds);
|
||||
if (onSuccess != null)
|
||||
_context.jobQueue().addJob(onSuccess);
|
||||
//if (!chanceOfFloodingOurOwn(-1)) {
|
||||
flood(ds);
|
||||
if (onSuccess != null)
|
||||
_context.jobQueue().addJob(onSuccess);
|
||||
//} else {
|
||||
// _context.jobQueue().addJob(new FloodfillStoreJob(_context, this, key, ds, onSuccess, onFailure, sendTimeout, toIgnore));
|
||||
//} Less sure I should do this this time around. TODO: figure out how this should adjust
|
||||
} else {
|
||||
_context.jobQueue().addJob(new FloodfillStoreJob(_context, this, key, ds, onSuccess, onFailure, sendTimeout, toIgnore));
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: figure out how this should work
|
||||
private boolean chanceOfFloodingOurOwn(int percent) {
|
||||
if (percent < 0) {
|
||||
// make percent equal to 1-peer.failedLookupRate by retrieving it from the stats
|
||||
RateStat percentRate = _context.statManager().getRate("netDb.failedLookupRate");
|
||||
if (percentRate != null)
|
||||
percent = (1-(int)percentRate.getLifetimeAverageValue())*100;
|
||||
else {
|
||||
_log.warn("chanceOfFloodingOurOwn() could not find netDb.failedLookupRate");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// if the router has been up for at least an hour
|
||||
if (_context.router().getUptime() > 60*60*1000) {
|
||||
// then 30% of the time return true
|
||||
return Math.random() < (percent / 100.0f);
|
||||
}
|
||||
return false;
|
||||
}*/
|
||||
|
||||
/**
|
||||
* Increments and tests.
|
||||
* @since 0.7.11
|
||||
@ -205,6 +241,21 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
return _lookupThrottler == null || _lookupThrottler.shouldThrottle(from, id);
|
||||
}
|
||||
|
||||
boolean shouldBanLookup(Hash from, TunnelId id) {
|
||||
// null before startup
|
||||
return _lookupBanner == null || _lookupBanner.shouldThrottle(from, id);
|
||||
}
|
||||
|
||||
boolean shouldThrottleBurstLookup(Hash from, TunnelId id) {
|
||||
// null before startup
|
||||
return _lookupThrottler == null || _lookupThrottlerBurst.shouldThrottle(from, id);
|
||||
}
|
||||
|
||||
boolean shouldBanBurstLookup(Hash from, TunnelId id) {
|
||||
// null before startup
|
||||
return _lookupBanner == null || _lookupBannerBurst.shouldThrottle(from, id);
|
||||
}
|
||||
|
||||
/**
|
||||
* If we are floodfill AND the key is not throttled,
|
||||
* flood it, otherwise don't.
|
||||
|
@ -31,6 +31,7 @@ import net.i2p.router.OutNetMessage;
|
||||
import net.i2p.router.Router;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.TunnelInfo;
|
||||
import net.i2p.router.message.OutboundCache;
|
||||
import net.i2p.router.message.SendMessageDirectJob;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.SystemVersion;
|
||||
@ -90,14 +91,24 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
|
||||
// somebody has our keys...
|
||||
// This could happen with multihoming - where it's really important to prevent
|
||||
// storing the other guy's leaseset, it will confuse us badly.
|
||||
LeaseSet ls = (LeaseSet) entry;
|
||||
if (getContext().clientManager().isLocal(key)) {
|
||||
//getContext().statManager().addRateData("netDb.storeLocalLeaseSetAttempt", 1, 0);
|
||||
getContext().statManager().addRateData("netDb.storeLocalLeaseSetAttempt", 1, 0);
|
||||
// throw rather than return, so that we send the ack below (prevent easy attack)
|
||||
dontBlamePeer = true;
|
||||
// store the peer in the outboundCache instead so that we can reply back with it without confusing ourselves.
|
||||
if (ls.isCurrent(Router.CLOCK_FUDGE_FACTOR / 4)) {
|
||||
if (_facade.validate(key, ls) == null) {
|
||||
LeaseSet compareLeasesetDate = getContext().clientMessagePool().getCache().multihomedCache.get(key);
|
||||
if (compareLeasesetDate == null)
|
||||
getContext().clientMessagePool().getCache().multihomedCache.put(key, ls);
|
||||
else if (compareLeasesetDate.getEarliestLeaseDate() < ls.getEarliestLeaseDate())
|
||||
getContext().clientMessagePool().getCache().multihomedCache.put(key, ls);
|
||||
}
|
||||
}
|
||||
throw new IllegalArgumentException("Peer attempted to store local leaseSet: " +
|
||||
key.toBase32());
|
||||
}
|
||||
LeaseSet ls = (LeaseSet) entry;
|
||||
//boolean oldrar = ls.getReceivedAsReply();
|
||||
//boolean oldrap = ls.getReceivedAsPublished();
|
||||
// If this was received as a response to a query,
|
||||
@ -109,7 +120,7 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
|
||||
// receive in response to our own lookups.
|
||||
// See ../HDLMJ for more info
|
||||
if (!ls.getReceivedAsReply())
|
||||
ls.setReceivedAsPublished(true);
|
||||
ls.setReceivedAsPublished();
|
||||
//boolean rap = ls.getReceivedAsPublished();
|
||||
//if (_log.shouldLog(Log.INFO))
|
||||
// _log.info("oldrap? " + oldrap + " oldrar? " + oldrar + " newrap? " + rap);
|
||||
@ -162,9 +173,9 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
|
||||
if (_message.getReceivedAsReply()) {
|
||||
ri.setReceivedAsReply();
|
||||
if (_message.getReplyToken() > 0)
|
||||
ri.setReceivedAsPublished(true);
|
||||
ri.setReceivedAsPublished();
|
||||
} else {
|
||||
ri.setReceivedAsPublished(true);
|
||||
ri.setReceivedAsPublished();
|
||||
}
|
||||
}
|
||||
if (_log.shouldInfo()) {
|
||||
|
@ -889,7 +889,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
* @throws UnsupportedCryptoException if that's why it failed.
|
||||
* @return reason why the entry is not valid, or null if it is valid
|
||||
*/
|
||||
private String validate(Hash key, LeaseSet leaseSet) throws UnsupportedCryptoException {
|
||||
public String validate(Hash key, LeaseSet leaseSet) throws UnsupportedCryptoException {
|
||||
if (!key.equals(leaseSet.getHash())) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Invalid store attempt! key does not match leaseSet.destination! key = "
|
||||
@ -981,18 +981,31 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
if (rv != null && rv.getEarliestLeaseDate() >= leaseSet.getEarliestLeaseDate()) {
|
||||
if (_log.shouldDebug())
|
||||
_log.debug("Not storing older " + key);
|
||||
// if it hasn't changed, no need to do anything
|
||||
// except copy over the flags
|
||||
Hash to = leaseSet.getReceivedBy();
|
||||
if (to != null) {
|
||||
rv.setReceivedBy(to);
|
||||
} else if (leaseSet.getReceivedAsReply()) {
|
||||
rv.setReceivedAsReply();
|
||||
}
|
||||
if (leaseSet.getReceivedAsPublished()) {
|
||||
rv.setReceivedAsPublished(true);
|
||||
}
|
||||
return rv;
|
||||
// TODO: Determine if this deep equals is actually truly necessary as part of this test or if the date is actually enough
|
||||
if (rv.equals(leaseSet)) {
|
||||
if (_log.shouldDebug())
|
||||
_log.debug("Updating leaseSet found in Datastore " + key);
|
||||
/** - DatabaseEntry.java note
|
||||
* we used to just copy the flags here but due to concerns about crafted
|
||||
* entries being used to "follow" a leaseSet from one context to another,
|
||||
* i.e. sent to a client vs sent to a router. Copying the entire leaseSet,
|
||||
* flags and all, limits the ability of the attacker craft leaseSet entries
|
||||
* maliciously.
|
||||
*/
|
||||
_ds.put(key, leaseSet);
|
||||
rv = (LeaseSet)_ds.get(key);
|
||||
Hash to = leaseSet.getReceivedBy();
|
||||
if (to != null) {
|
||||
rv.setReceivedBy(to);
|
||||
} else if (leaseSet.getReceivedAsReply()) {
|
||||
rv.setReceivedAsReply();
|
||||
}
|
||||
if (leaseSet.getReceivedAsPublished()) {
|
||||
rv.setReceivedAsPublished();
|
||||
}
|
||||
return rv;
|
||||
}// TODO: Is there any reason to do anything here, if the fields are somehow unequal?
|
||||
// Like, is there any case where this is not true? I don't think it's possible for it to be.
|
||||
}
|
||||
} catch (ClassCastException cce) {
|
||||
throw new IllegalArgumentException("Attempt to replace RI with " + leaseSet);
|
||||
|
@ -19,11 +19,18 @@ class LookupThrottler {
|
||||
private final ObjectCounter<ReplyTunnel> counter;
|
||||
/** the id of this is -1 */
|
||||
private static final TunnelId DUMMY_ID = new TunnelId();
|
||||
/** this seems like plenty */
|
||||
private static final int MAX_LOOKUPS = 30;
|
||||
private static final long CLEAN_TIME = 3*60*1000;
|
||||
|
||||
/** 30 seems like plenty, possibly too many, maybe dial this down again next release(2.4.0)*/
|
||||
private final int MAX_LOOKUPS; // DEFAULT=20
|
||||
private final long CLEAN_TIME; // DEFAULT=3*60*1000
|
||||
LookupThrottler() {
|
||||
MAX_LOOKUPS = 20;
|
||||
CLEAN_TIME = 3*60*1000;
|
||||
this.counter = new ObjectCounter<ReplyTunnel>();
|
||||
SimpleTimer2.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
|
||||
}
|
||||
LookupThrottler(int maxlookups, long cleanTime) {
|
||||
MAX_LOOKUPS = maxlookups;
|
||||
CLEAN_TIME = cleanTime;
|
||||
this.counter = new ObjectCounter<ReplyTunnel>();
|
||||
SimpleTimer2.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
|
||||
}
|
||||
|
Reference in New Issue
Block a user