Compare commits

...

35 Commits

Author SHA1 Message Date
idk
564e2101e7 validate multihomed RI's before putting them into the cache 2023-06-29 07:51:03 -04:00
idk
5c5cd8e909 check the possibleMultihomed cache when we are not a floodfill too 2023-06-29 01:28:37 -04:00
idk
5b8f0d41f1 check the possibleMultihomed cache when we are not a floodfill too 2023-06-29 01:22:18 -04:00
idk
d1d1ee157f else clause 2023-06-29 00:48:34 -04:00
idk
fc447453f0 update history 2023-06-29 00:39:03 -04:00
idk
386b25c3b6 validate dates on leasesets before caching them and after serving them 2023-06-29 00:36:17 -04:00
idk
bda12fb627 remove entries from possibleMultihomed when they expire 2023-06-29 00:18:06 -04:00
idk
678c3db2f2 remove entries from possibleMultihomed when they expire 2023-06-29 00:11:49 -04:00
idk
42c83194e2 fix comment 2023-06-28 23:40:46 -04:00
idk
6f8460607a make a note of the remaining window 2023-06-28 23:39:56 -04:00
idk
fa7b313134 serve leaseSet out of possibleMultihomed if we are not closest in the keyspace 2023-06-28 23:21:12 -04:00
idk
e26754f9ed do the flag updates after the overwrite 2023-06-28 16:16:48 -04:00
idk
e6c74fb494 It would seem that it is not a reliable safety check to prevent setRecievedAsReply
based on the value of _recievedAsPublished, safety checks are possible here but only
after segmented netdb.
2023-06-28 16:12:36 -04:00
idk
8805bf0944 small tweak to how stores work 2023-06-28 15:12:21 -04:00
idk
c2e8a9716e update logging messages 2023-06-28 12:35:28 -04:00
idk
e912f20ba6 adjust the timers of the lookup throttlers 2023-06-28 12:32:27 -04:00
idk
61abf49f3d fix typo, deduplicate throttler code, log potentially-bannable bursts earlier than we handle the dlm 2023-06-28 11:55:26 -04:00
idk
646d2623bf in HandleDatabaseLookupMessage always treat LeaseSets stored in the multihomeCache as if they were published to us(because they were) when responding as a floodfill 2023-06-28 10:02:20 -04:00
idk
12c1c9459d don't call the function just check the var 2023-06-27 18:09:02 -04:00
idk
f23ac402ba use answerAllQueries instead of shouldPublishLocal 2023-06-27 17:38:44 -04:00
idk
13bcb7fcd5 add infrastructure to ban lookups when many lookups are sent very rapidly 2023-06-27 17:19:19 -04:00
idk
f6c6fb8bbb re-disable the flood chance of our own RI because I'm not sure of what I thought before.
put possible multihomes or potential attack leaseSets onto a list in the OutBoundCache and make the OutboundCache accessible from the clientMessagePool so that it can be grabbed from the context.
then, when it's time to send it, behave as if it were stored in our netDB by calling it back out of the OutboundCache.
It's pretty ugly but it will work.
Fix indentation in KademliaNetworkDatabaseFacade.
2023-06-27 17:05:33 -04:00
idk
9eac400261 move context checks for get/set recievedAs Reply/Published into DatabaseEntry and make them mutually exclusive 2023-06-27 09:58:15 -04:00
idk
14f4323889 Merge branch 'master' of i2pgit.org:i2p-hackers/i2p.i2p into i2p.i2p.2.2.1-always-copy-full-leaseSet-when-updating 2023-06-27 08:25:30 -04:00
idk
eb0c8aaaa9 correct the error in the previous checkin 2023-06-26 16:44:37 -04:00
idk
cc5e3e94d0 slide the chanceOfFloodingOurOwn with our failedLookupRate 2023-06-26 16:37:34 -04:00
idk
4cbaad5e50 Sometimes answer a query for our own leaseset from ourselves based on a percent change for now. Right now we do answer it 30% of the time 2023-06-26 16:18:41 -04:00
idk
80cc0bb1ce Merge branch 'master' of i2pgit.org:i2p-hackers/i2p.i2p into i2p.i2p.2.2.1-always-copy-full-leaseSet-when-updating 2023-06-26 15:04:28 -04:00
idk
e55e6bc7a2 move singleIPv6BlockList null check in clear outside of the synchronized block 2023-06-26 14:18:48 -04:00
idk
0ac7d69212 add null checks around _singleIPv6Blocklist 2023-06-26 14:12:39 -04:00
idk
d3e71f6f70 some TODO comments 2023-06-24 17:06:35 -04:00
idk
9de1dd46e1 screen leaseSets before storing them in KademliaNetworkDatabaseFacade.store, do not update setRecievedAsPublished if we already getRecievedAsReply already 2023-06-24 06:00:14 -04:00
idk
ace62a1677 add notbob and ramble to the homepage, tooltips won't be translated for 2.3.0 but we'll live with that for now. 2023-06-23 17:51:21 -04:00
idk
95c29649bd Merge branch 'i2p.i2p.2.2.1-blocklist-expiration' into 'master'
add user-configurable expire-time to blocklists.

See merge request i2p-hackers/i2p.i2p!93
2023-06-19 00:05:03 +00:00
idk
912b534c31 add user-configurable expire-time to blocklists. 2023-06-19 00:05:02 +00:00
12 changed files with 347 additions and 56 deletions

View File

@ -95,6 +95,8 @@ public class HomeHelper extends HelperBase {
//_x("The Tin Hat") + S + _x("Privacy guides and tutorials") + S + "http://secure.thetinhat.i2p/" + S + I + "thetinhat.png" + S +
//_x("Ugha's Wiki") + S + S + "http://ugha.i2p/" + S + I + "billiard_marker.png" + S +
//"sponge.i2p" + S + _x("Seedless and the Robert BitTorrent applications") + S + "http://sponge.i2p/" + S + I + "user_astronaut.png" + S +
"notbob.i2p" + S + _x("Not Bob's Address Servies") + S + "http://notbob.i2p/" + S + I + "notbob.png" + S +
"[Ramble]" + S + _x("Ramble user-moderated forum aggregator") + S + "http://ramble.i2p/" + S + I + "notbob.png" + S +
"";
// No commas allowed in text strings!

View File

@ -263,12 +263,20 @@ public abstract class DatabaseEntry extends DataStructureImpl {
*
* @since 0.9.58 moved up from LeaseSet
*/
public boolean getReceivedAsPublished() { return _receivedAsPublished; }
public boolean getReceivedAsPublished() {
return _receivedAsPublished;
}
/**
* @since 0.9.58 moved up from LeaseSet
*
* use this carefully, when updating the flags make sure the old and new
* leaseSet are actually equivalent, or simply copy over the reply value,
* see KademliaNetworkDatabaseFacade.java line 997 for more information.
*/
public void setReceivedAsPublished(boolean received) { _receivedAsPublished = received; }
public void setReceivedAsPublished() {
_receivedAsPublished = true;
}
/**
* If true, we received this LeaseSet by searching for it
@ -276,12 +284,16 @@ public abstract class DatabaseEntry extends DataStructureImpl {
*
* @since 0.7.14, moved up from LeaseSet in 0.9.58
*/
public boolean getReceivedAsReply() { return _receivedAsReply; }
public boolean getReceivedAsReply() {
return _receivedAsReply;
}
/**
* set to true
*
* @since 0.7.14, moved up from LeaseSet in 0.9.58
*/
public void setReceivedAsReply() { _receivedAsReply = true; }
public void setReceivedAsReply() {
_receivedAsReply = true;
}
}

View File

@ -1,3 +1,11 @@
2023-06-28 idk
* Cache stores of multihomed leaseSets when stored from multihome peers,
and if our local leaseSet is not in the keyspace return the multihome instead
* When updating a leaseSet because recieving it as published always make a
complete copy of the leaseSet before merging the flags.
* Rate-Limit lookups
* I2P 2.3.0
2023-05-29 idk
* adds "virtual contexts" to bloom filter, where each entity that
passes an i2np message to the bloom filter xor's the messageID with a random, local value.

View File

@ -17,6 +17,7 @@ import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
@ -89,12 +90,14 @@ public class Blocklist {
private final File _blocklistFeedFile;
private final boolean _haveIPv6;
private boolean _started;
private long _lastExpired = 0;
// temp
private final Map<Hash, String> _peerBlocklist = new HashMap<Hash, String>(4);
private static final String PROP_BLOCKLIST_ENABLED = "router.blocklist.enable";
private static final String PROP_BLOCKLIST_DETAIL = "router.blocklist.detail";
private static final String PROP_BLOCKLIST_FILE = "router.blocklist.file";
private static final String PROP_BLOCKLIST_EXPIRE_INTERVAL = "router.blocklist.expireInterval";
public static final String BLOCKLIST_FILE_DEFAULT = "blocklist.txt";
private static final String BLOCKLIST_FEED_FILE = "docs/feed/blocklist/blocklist.txt";
/** @since 0.9.48 */
@ -147,6 +150,37 @@ public class Blocklist {
_singleIPv6Blocklist = _haveIPv6 ? new LHMCache<BigInteger, Object>(MAX_IPV6_SINGLES) : null;
}
private int expireInterval(){
String expireIntervalValue = _context.getProperty(PROP_BLOCKLIST_EXPIRE_INTERVAL, "0");
try{
Integer expireIntervalInt = 0;
if (expireIntervalValue.endsWith("s")) {
expireIntervalValue = expireIntervalValue.substring(0, expireIntervalValue.length() - 1);
expireIntervalInt = Integer.parseInt(expireIntervalValue) * 1000;
}else if(expireIntervalValue.endsWith("m")){
expireIntervalValue = expireIntervalValue.substring(0, expireIntervalValue.length() - 1);
expireIntervalInt = Integer.parseInt(expireIntervalValue) * 60000;
}else if(expireIntervalValue.endsWith("h")){
expireIntervalValue = expireIntervalValue.substring(0, expireIntervalValue.length() - 1);
expireIntervalInt = Integer.parseInt(expireIntervalValue) * 3600000;
}else if (expireIntervalValue.endsWith("d")) {
expireIntervalValue = expireIntervalValue.substring(0, expireIntervalValue.length() - 1);
expireIntervalInt = Integer.parseInt(expireIntervalValue) * 86400000;
}else{
expireIntervalInt = Integer.parseInt(expireIntervalValue);
}
if (expireIntervalInt < 0)
expireIntervalInt = 0;
return expireIntervalInt;
}catch(NumberFormatException nfe){
if (_log.shouldLog(_log.ERROR))
_log.error("format error in "+PROP_BLOCKLIST_EXPIRE_INTERVAL, nfe);
}
// if we don't have a valid value in this field, return 0 which is the same as disabling it.
return 0;
}
/**
* Loads the following files in-order:
* $I2P/blocklist.txt
@ -193,6 +227,11 @@ public class Blocklist {
// but it's important to have this initialized before we read in the netdb.
//job.getTiming().setStartAfter(_context.clock().now() + 30*1000);
_context.jobQueue().addJob(job);
if (expireInterval() > 0) {
Job cleanupJob = new CleanupJob();
cleanupJob.getTiming().setStartAfter(_context.clock().now() + expireInterval());
_context.jobQueue().addJob(cleanupJob);
}
}
/**
@ -232,6 +271,34 @@ public class Blocklist {
}
}
}
private class CleanupJob extends JobImpl {
public CleanupJob() {
super(_context);
}
public String getName(){
return "Expire blocklist at user-defined interval of " + expireInterval();
}
public void runJob() {
clear();
_lastExpired = System.currentTimeMillis();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Expiring blocklist entrys at" + _lastExpired);
// schedule the next one
super.requeue(expireInterval());
}
}
private void clear(){
synchronized(_singleIPBlocklist) {
_singleIPBlocklist.clear();
}
if (_singleIPv6Blocklist != null) {
synchronized(_singleIPv6Blocklist) {
_singleIPv6Blocklist.clear();
}
}
}
private class ReadinJob extends JobImpl {
private final List<BLFile> _files;
@ -285,13 +352,20 @@ public class Blocklist {
reason = _x("Banned by router hash: {0}");
else
reason = _x("Banned by router hash");
_context.banlist().banlistRouterForever(peer, reason, comment);
banlistRouter(peer, reason, comment);
}
_peerBlocklist.clear();
return count;
}
}
private void banlistRouter(Hash peer, String reason, String comment) {
if (expireInterval() > 0)
_context.banlist().banlistRouter(peer, reason, comment, null, expireInterval());
else
_context.banlist().banlistRouterForever(peer, reason, comment);
}
/**
* The blocklist-country.txt file was created or updated.
* Read it in. Not required normally, as the country file
@ -822,9 +896,12 @@ public class Blocklist {
* @since IPv6
*/
private boolean add(BigInteger ip) {
synchronized(_singleIPv6Blocklist) {
return _singleIPv6Blocklist.put(ip, DUMMY) == null;
if (_singleIPv6Blocklist != null) {
synchronized(_singleIPv6Blocklist) {
return _singleIPv6Blocklist.put(ip, DUMMY) == null;
}
}
return false;
}
/**
@ -832,8 +909,10 @@ public class Blocklist {
* @since 0.9.28
*/
private void remove(BigInteger ip) {
synchronized(_singleIPv6Blocklist) {
_singleIPv6Blocklist.remove(ip);
if (_singleIPv6Blocklist != null) {
synchronized(_singleIPv6Blocklist) {
_singleIPv6Blocklist.remove(ip);
}
}
}
@ -842,9 +921,12 @@ public class Blocklist {
* @since IPv6
*/
private boolean isOnSingleList(BigInteger ip) {
synchronized(_singleIPv6Blocklist) {
return _singleIPv6Blocklist.get(ip) != null;
if (_singleIPv6Blocklist != null) {
synchronized(_singleIPv6Blocklist) {
return _singleIPv6Blocklist.get(ip) != null;
}
}
return false;
}
/**
@ -886,6 +968,9 @@ public class Blocklist {
/**
* Does the peer's IP address appear in the blocklist?
* If so, and it isn't banlisted, banlist it forever...
* or, if the user configured an override, ban it for the
* override period.
* @since 0.9.29
*/
public boolean isBlocklisted(Hash peer) {
List<byte[]> ips = getAddresses(peer);
@ -905,6 +990,8 @@ public class Blocklist {
/**
* Does the peer's IP address appear in the blocklist?
* If so, and it isn't banlisted, banlist it forever...
* or, if the user configured an override, ban it for the
* override period.
* @since 0.9.29
*/
public boolean isBlocklisted(RouterInfo pinfo) {
@ -1141,7 +1228,7 @@ public class Blocklist {
_context.clock().now() + Banlist.BANLIST_DURATION_LOCALHOST);
return;
}
_context.banlist().banlistRouterForever(peer, reason, sip);
banlistRouter(peer, reason, sip);
if (! _context.getBooleanPropertyDefaultTrue(PROP_BLOCKLIST_DETAIL))
return;
boolean shouldRunJob;
@ -1169,7 +1256,7 @@ public class Blocklist {
}
public String getName() { return "Ban Peer by IP"; }
public void runJob() {
banlistForever(_peer, _ips);
banlistRouter(_peer, _ips, expireInterval());
synchronized (_inProcess) {
_inProcess.remove(_peer);
}
@ -1185,7 +1272,13 @@ public class Blocklist {
* So we also stagger these jobs.
*
*/
private synchronized void banlistForever(Hash peer, List<byte[]> ips) {
private void banlistRouter( Hash peer, String reason, String reasonCode, long duration) {
if (duration > 0)
_context.banlist().banlistRouter(peer, reason, reasonCode, null, System.currentTimeMillis()+expireInterval());
else
_context.banlist().banlistRouterForever(peer, reason, reasonCode);
}
private synchronized void banlistRouter(Hash peer, List<byte[]> ips, long duration) {
// This only checks one file for now, pick the best one
// user specified
File blFile = null;
@ -1205,7 +1298,7 @@ public class Blocklist {
// just ban it and be done
if (_log.shouldLog(Log.WARN))
_log.warn("Banlisting " + peer);
_context.banlist().banlistRouterForever(peer, "Banned");
banlistRouter(peer, "Banned", "Banned", expireInterval());
return;
}
@ -1236,7 +1329,7 @@ public class Blocklist {
//reason = reason + " banned by " + BLOCKLIST_FILE_DEFAULT + " entry \"" + buf + "\"";
if (_log.shouldLog(Log.WARN))
_log.warn("Banlisting " + peer + " " + reason);
_context.banlist().banlistRouterForever(peer, reason, buf.toString());
banlistRouter(peer, reason, buf.toString(), expireInterval());
return;
}
}
@ -1277,9 +1370,12 @@ public class Blocklist {
public List<BigInteger> getTransientIPv6Blocks() {
if (!_haveIPv6)
return Collections.<BigInteger>emptyList();
synchronized(_singleIPv6Blocklist) {
return new ArrayList<BigInteger>(_singleIPv6Blocklist.keySet());
if (_singleIPv6Blocklist != null) {
synchronized(_singleIPv6Blocklist) {
return new ArrayList<BigInteger>(_singleIPv6Blocklist.keySet());
}
}
return Collections.<BigInteger>emptyList();
}
/**

View File

@ -31,6 +31,10 @@ public class ClientMessagePool {
_cache = new OutboundCache(_context);
OutboundClientMessageOneShotJob.init(_context);
}
public OutboundCache getCache() {
return _cache;
}
/**
* @since 0.8.8

View File

@ -95,6 +95,15 @@ public class OutboundCache {
*/
final Map<HashPair, Long> lastReplyRequestCache = new ConcurrentHashMap<HashPair, Long>(64);
/**
* This cache is used to keep track of when we recieve a leaseSet from a router
* we are multihomed with, or otherwise are asked to store a valid routerInfo for
* a destination which we also host.
*/
public final ConcurrentHashMap<Hash, LeaseSet> multihomedCache = new ConcurrentHashMap<Hash, LeaseSet>(64);
private final RouterContext _context;
private static final int CLEAN_INTERVAL = 5*60*1000;

View File

@ -147,14 +147,40 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
// Only send it out if it is in our estimated keyspace.
// For this, we do NOT use their dontInclude list as it can't be trusted
// (i.e. it could mess up the closeness calculation)
LeaseSet possibleMultihomed = getContext().clientMessagePool().getCache().multihomedCache.get(searchKey);
Set<Hash> closestHashes = getContext().netDb().findNearestRouters(searchKey,
CLOSENESS_THRESHOLD, null);
if (weAreClosest(closestHashes)) {
// It's in our keyspace, so give it to them
// there is a slight chance that there is also a multihomed router in our cache at the
// same time we are closest to our locally published leaseSet. That means there is a slight
// chance an attacker can send a least as a store which goes into the multihome cache, then
// fetch back a locally-created, locally-published leaseset. BUT, if we always publish a
// multihomed leaseset even if we are closest to the local, we never send it out if a potential
// multihome is found in the cache.
if (_log.shouldLog(Log.INFO))
_log.info("We have local LS " + searchKey + ", answering query, in our keyspace");
getContext().statManager().addRateData("netDb.lookupsMatchedLocalClosest", 1);
sendData(searchKey, ls, fromKey, toTunnel);
} else if (possibleMultihomed != null) {
if (possibleMultihomed.isCurrent(Router.CLOCK_FUDGE_FACTOR / 4)) {
// If it's in the possibleMultihomed cache, then it was definitely stored to us meaning it is effectively
// always recievedAsPublished. No need to decide whether or not to answer the request like above, just
// answer it so it doesn't look different from other stores.
if (_log.shouldLog(Log.INFO))
_log.info("We have local LS, possibly from a multihomed router " + searchKey + ", and somebody requested it back from us. Answering query, as if in our keyspace, to avoid attack.");
getContext().statManager().addRateData("netDb.lookupsMatchedLocalMultihome", 1);
sendData(searchKey, possibleMultihomed, fromKey, toTunnel);
} else {
// if it expired, remove it from the cache.
getContext().clientMessagePool().getCache().multihomedCache.remove(searchKey);
// Lie, pretend we don't have it
if (_log.shouldLog(Log.INFO))
_log.info("We have local LS " + searchKey + ", NOT answering query, out of our keyspace");
getContext().statManager().addRateData("netDb.lookupsMatchedLocalNotClosest", 1);
Set<Hash> routerHashSet = getNearestRouters(lookupType);
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
}
} else {
// Lie, pretend we don't have it
if (_log.shouldLog(Log.INFO))
@ -164,17 +190,44 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
}
} else {
// It was not published to us (we looked it up, for example)
// or it's local and we aren't floodfill,
// or it's local and we don't publish it.
// Lie, pretend we don't have it
if (_log.shouldLog(Log.INFO))
_log.info("We have LS " + searchKey +
", NOT answering query - local? " + isLocal + " shouldPublish? " + shouldPublishLocal +
" RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply());
getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1);
Set<Hash> routerHashSet = getNearestRouters(lookupType);
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
LeaseSet possibleMultihomed = getContext().clientMessagePool().getCache().multihomedCache.get(searchKey);
if (possibleMultihomed != null) {
if (possibleMultihomed.isCurrent(Router.CLOCK_FUDGE_FACTOR / 4)) {
// If it's in the possibleMultihomed cache, then it was definitely stored to us meaning it is effectively
// always recievedAsPublished. No need to decide whether or not to answer the request like above, just
// answer it so it doesn't look different from other stores.
if (_log.shouldLog(Log.INFO))
_log.info("We have local LS, possibly from a multihomed router " + searchKey + ", and somebody requested it back from us. Answering query, as if in our keyspace, to avoid attack.");
getContext().statManager().addRateData("netDb.lookupsMatchedLocalMultihome", 1);
sendData(searchKey, possibleMultihomed, fromKey, toTunnel);
} else {
// if it expired, remove it from the cache.
getContext().clientMessagePool().getCache().multihomedCache.remove(searchKey);
// It was not published to us (we looked it up, for example)
// or it's local and we aren't floodfill,
// or it's local and we don't publish it.
// Lie, pretend we don't have it
if (_log.shouldLog(Log.INFO))
_log.info("We have LS " + searchKey +
", NOT answering query - local? " + isLocal + " shouldPublish? " + shouldPublishLocal +
" RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply());
getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1);
Set<Hash> routerHashSet = getNearestRouters(lookupType);
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
}
} else {
// It was not published to us (we looked it up, for example)
// or it's local and we aren't floodfill,
// or it's local and we don't publish it.
// Lie, pretend we don't have it
if (_log.shouldLog(Log.INFO))
_log.info("We have LS " + searchKey +
", NOT answering query - local? " + isLocal + " shouldPublish? " + shouldPublishLocal +
" RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply());
getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1);
Set<Hash> routerHashSet = getNearestRouters(lookupType);
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
}
}
} else if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO &&
lookupType != DatabaseLookupMessage.Type.LS) {

View File

@ -48,14 +48,39 @@ public class FloodfillDatabaseLookupMessageHandler implements HandlerJobBuilder
_context.statManager().addRateData("netDb.lookupsReceived", 1);
DatabaseLookupMessage dlm = (DatabaseLookupMessage)receivedMessage;
if (!_facade.shouldThrottleLookup(dlm.getFrom(), dlm.getReplyTunnel())) {
if (_facade.shouldBanLookup(dlm.getFrom(), dlm.getReplyTunnel())) {
if (_log.shouldLog(Log.WARN)) {
_log.warn("Possibly throttling " + dlm.getSearchType() + " lookup request for " + dlm.getSearchKey() + " because requests are being sent extremely fast, reply was to: " + dlm.getFrom() + " tunnel: " + dlm.getReplyTunnel());
_context.statManager().addRateData("netDb.repeatedLookupsDropped", 1);
}
/*
* We don't do this yet, but we do ban routers who do much faster bursts of lookups
* _context.banlist().banlistRouter(dlm.getFrom(), " <b>➜</b> Excessive lookup requests", null, null, _context.clock().now() + 4*60*60*1000);
* _context.commSystem().mayDisconnect(dlm.getFrom());
* _context.statManager().addRateData("netDb.lookupsDropped", 1);
* return null;
*/
}
if (_facade.shouldBanBurstLookup(dlm.getFrom(), dlm.getReplyTunnel())) {
if (_log.shouldLog(Log.WARN)) {
_log.warn("Banning " + dlm.getSearchType() + " lookup request for " + dlm.getSearchKey() + " because requests are being sent extremely fast in a very short time, reply was to: " + dlm.getFrom() + " tunnel: " + dlm.getReplyTunnel());
_context.statManager().addRateData("netDb.repeatedBurstLookupsDropped", 1);
}
_context.banlist().banlistRouter(dlm.getFrom(), " <b>➜</b> Excessive lookup requests, burst", null, null, _context.clock().now() + 4*60*60*1000);
_context.commSystem().mayDisconnect(dlm.getFrom());
_context.statManager().addRateData("netDb.lookupsDropped", 1);
return null;
}
if ((!_facade.shouldThrottleLookup(dlm.getFrom(), dlm.getReplyTunnel()) && !_facade.shouldThrottleBurstLookup(dlm.getFrom(), dlm.getReplyTunnel()))
|| _context.routerHash().equals(dlm.getFrom())) {
Job j = new HandleFloodfillDatabaseLookupMessageJob(_context, dlm, from, fromHash, _msgIDBloomXor);
//if (false) {
// // might as well inline it, all the heavy lifting is queued up in later jobs, if necessary
// j.runJob();
// return null;
//} else {
return j;
return j;
//}
} else {
if (_log.shouldLog(Log.WARN))

View File

@ -39,7 +39,16 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
private final Set<Hash> _verifiesInProgress;
private FloodThrottler _floodThrottler;
private LookupThrottler _lookupThrottler;
private LookupThrottler _lookupThrottlerBurst;
private LookupThrottler _lookupBanner;
private LookupThrottler _lookupBannerBurst;
private final Job _ffMonitor;
private final int BAN_LOOKUP_BASE = 50;
private final int BAN_LOOKUP_BASE_INTERVAL = 5*60*1000;
private final int BAN_LOOKUP_BURST = 10;
private final int BAN_LOOKUP_BURST_INTERVAL = 15*1000;
private final int DROP_LOOKUP_BURST = 10;
private final int DROP_LOOKUP_BURST_INTERVAL = 30*1000;
/**
* This is the flood redundancy. Entries are
@ -84,6 +93,9 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
super.startup();
_context.jobQueue().addJob(_ffMonitor);
_lookupThrottler = new LookupThrottler();
_lookupBanner = new LookupThrottler(BAN_LOOKUP_BASE, BAN_LOOKUP_BASE_INTERVAL);
_lookupThrottlerBurst = new LookupThrottler(DROP_LOOKUP_BURST, DROP_LOOKUP_BURST_INTERVAL);
_lookupBannerBurst = new LookupThrottler(BAN_LOOKUP_BURST, BAN_LOOKUP_BURST_INTERVAL);
boolean isFF = _context.getBooleanProperty(FloodfillMonitorJob.PROP_FLOODFILL_PARTICIPANT);
long down = _context.router().getEstimatedDowntime();
@ -180,14 +192,38 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
// of the flooding - instead, send them to a random floodfill peer so *they* can flood 'em out.
// perhaps statistically adjust this so we are the source every 1/N times... or something.
if (floodfillEnabled() && (ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)) {
flood(ds);
if (onSuccess != null)
_context.jobQueue().addJob(onSuccess);
//if (!chanceOfFloodingOurOwn(-1)) {
flood(ds);
if (onSuccess != null)
_context.jobQueue().addJob(onSuccess);
//} else {
// _context.jobQueue().addJob(new FloodfillStoreJob(_context, this, key, ds, onSuccess, onFailure, sendTimeout, toIgnore));
//} Less sure I should do this this time around. TODO: figure out how this should adjust
} else {
_context.jobQueue().addJob(new FloodfillStoreJob(_context, this, key, ds, onSuccess, onFailure, sendTimeout, toIgnore));
}
}
/* TODO: figure out how this should work
private boolean chanceOfFloodingOurOwn(int percent) {
if (percent < 0) {
// make percent equal to 1-peer.failedLookupRate by retrieving it from the stats
RateStat percentRate = _context.statManager().getRate("netDb.failedLookupRate");
if (percentRate != null)
percent = (1-(int)percentRate.getLifetimeAverageValue())*100;
else {
_log.warn("chanceOfFloodingOurOwn() could not find netDb.failedLookupRate");
return false;
}
}
// if the router has been up for at least an hour
if (_context.router().getUptime() > 60*60*1000) {
// then 30% of the time return true
return Math.random() < (percent / 100.0f);
}
return false;
}*/
/**
* Increments and tests.
* @since 0.7.11
@ -205,6 +241,21 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
return _lookupThrottler == null || _lookupThrottler.shouldThrottle(from, id);
}
boolean shouldBanLookup(Hash from, TunnelId id) {
// null before startup
return _lookupBanner == null || _lookupBanner.shouldThrottle(from, id);
}
boolean shouldThrottleBurstLookup(Hash from, TunnelId id) {
// null before startup
return _lookupThrottler == null || _lookupThrottlerBurst.shouldThrottle(from, id);
}
boolean shouldBanBurstLookup(Hash from, TunnelId id) {
// null before startup
return _lookupBanner == null || _lookupBannerBurst.shouldThrottle(from, id);
}
/**
* If we are floodfill AND the key is not throttled,
* flood it, otherwise don't.

View File

@ -31,6 +31,7 @@ import net.i2p.router.OutNetMessage;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.message.OutboundCache;
import net.i2p.router.message.SendMessageDirectJob;
import net.i2p.util.Log;
import net.i2p.util.SystemVersion;
@ -90,14 +91,24 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
// somebody has our keys...
// This could happen with multihoming - where it's really important to prevent
// storing the other guy's leaseset, it will confuse us badly.
LeaseSet ls = (LeaseSet) entry;
if (getContext().clientManager().isLocal(key)) {
//getContext().statManager().addRateData("netDb.storeLocalLeaseSetAttempt", 1, 0);
getContext().statManager().addRateData("netDb.storeLocalLeaseSetAttempt", 1, 0);
// throw rather than return, so that we send the ack below (prevent easy attack)
dontBlamePeer = true;
// store the peer in the outboundCache instead so that we can reply back with it without confusing ourselves.
if (ls.isCurrent(Router.CLOCK_FUDGE_FACTOR / 4)) {
if (_facade.validate(key, ls) == null) {
LeaseSet compareLeasesetDate = getContext().clientMessagePool().getCache().multihomedCache.get(key);
if (compareLeasesetDate == null)
getContext().clientMessagePool().getCache().multihomedCache.put(key, ls);
else if (compareLeasesetDate.getEarliestLeaseDate() < ls.getEarliestLeaseDate())
getContext().clientMessagePool().getCache().multihomedCache.put(key, ls);
}
}
throw new IllegalArgumentException("Peer attempted to store local leaseSet: " +
key.toBase32());
}
LeaseSet ls = (LeaseSet) entry;
//boolean oldrar = ls.getReceivedAsReply();
//boolean oldrap = ls.getReceivedAsPublished();
// If this was received as a response to a query,
@ -109,7 +120,7 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
// receive in response to our own lookups.
// See ../HDLMJ for more info
if (!ls.getReceivedAsReply())
ls.setReceivedAsPublished(true);
ls.setReceivedAsPublished();
//boolean rap = ls.getReceivedAsPublished();
//if (_log.shouldLog(Log.INFO))
// _log.info("oldrap? " + oldrap + " oldrar? " + oldrar + " newrap? " + rap);
@ -162,9 +173,9 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
if (_message.getReceivedAsReply()) {
ri.setReceivedAsReply();
if (_message.getReplyToken() > 0)
ri.setReceivedAsPublished(true);
ri.setReceivedAsPublished();
} else {
ri.setReceivedAsPublished(true);
ri.setReceivedAsPublished();
}
}
if (_log.shouldInfo()) {

View File

@ -889,7 +889,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
* @throws UnsupportedCryptoException if that's why it failed.
* @return reason why the entry is not valid, or null if it is valid
*/
private String validate(Hash key, LeaseSet leaseSet) throws UnsupportedCryptoException {
public String validate(Hash key, LeaseSet leaseSet) throws UnsupportedCryptoException {
if (!key.equals(leaseSet.getHash())) {
if (_log.shouldLog(Log.WARN))
_log.warn("Invalid store attempt! key does not match leaseSet.destination! key = "
@ -981,18 +981,31 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
if (rv != null && rv.getEarliestLeaseDate() >= leaseSet.getEarliestLeaseDate()) {
if (_log.shouldDebug())
_log.debug("Not storing older " + key);
// if it hasn't changed, no need to do anything
// except copy over the flags
Hash to = leaseSet.getReceivedBy();
if (to != null) {
rv.setReceivedBy(to);
} else if (leaseSet.getReceivedAsReply()) {
rv.setReceivedAsReply();
}
if (leaseSet.getReceivedAsPublished()) {
rv.setReceivedAsPublished(true);
}
return rv;
// TODO: Determine if this deep equals is actually truly necessary as part of this test or if the date is actually enough
if (rv.equals(leaseSet)) {
if (_log.shouldDebug())
_log.debug("Updating leaseSet found in Datastore " + key);
/** - DatabaseEntry.java note
* we used to just copy the flags here but due to concerns about crafted
* entries being used to "follow" a leaseSet from one context to another,
* i.e. sent to a client vs sent to a router. Copying the entire leaseSet,
* flags and all, limits the ability of the attacker craft leaseSet entries
* maliciously.
*/
_ds.put(key, leaseSet);
rv = (LeaseSet)_ds.get(key);
Hash to = leaseSet.getReceivedBy();
if (to != null) {
rv.setReceivedBy(to);
} else if (leaseSet.getReceivedAsReply()) {
rv.setReceivedAsReply();
}
if (leaseSet.getReceivedAsPublished()) {
rv.setReceivedAsPublished();
}
return rv;
}// TODO: Is there any reason to do anything here, if the fields are somehow unequal?
// Like, is there any case where this is not true? I don't think it's possible for it to be.
}
} catch (ClassCastException cce) {
throw new IllegalArgumentException("Attempt to replace RI with " + leaseSet);

View File

@ -19,11 +19,18 @@ class LookupThrottler {
private final ObjectCounter<ReplyTunnel> counter;
/** the id of this is -1 */
private static final TunnelId DUMMY_ID = new TunnelId();
/** this seems like plenty */
private static final int MAX_LOOKUPS = 30;
private static final long CLEAN_TIME = 3*60*1000;
/** 30 seems like plenty, possibly too many, maybe dial this down again next release(2.4.0)*/
private final int MAX_LOOKUPS; // DEFAULT=20
private final long CLEAN_TIME; // DEFAULT=3*60*1000
LookupThrottler() {
MAX_LOOKUPS = 20;
CLEAN_TIME = 3*60*1000;
this.counter = new ObjectCounter<ReplyTunnel>();
SimpleTimer2.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
}
LookupThrottler(int maxlookups, long cleanTime) {
MAX_LOOKUPS = maxlookups;
CLEAN_TIME = cleanTime;
this.counter = new ObjectCounter<ReplyTunnel>();
SimpleTimer2.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
}