2005-02-20 jrandom

* Only build failsafe tunnels if we need them
    * Properly implement the selectNotFailingPeers so that we get a random
      selection of peers, rather than using the strictOrdering (thanks dm!)
    * Don't include too many "don't tell me about" peer references in the
      lookup message - only send the 10 peer references closest to the target.
This commit is contained in:
jrandom
2005-02-20 09:12:43 +00:00
committed by zzz
parent 7d4e093b58
commit cbf6a70a1a
10 changed files with 141 additions and 33 deletions

View File

@ -1,4 +1,11 @@
$Id: history.txt,v 1.147 2005/02/18 10:58:20 jrandom Exp $
$Id: history.txt,v 1.148 2005/02/19 18:20:58 jrandom Exp $
2005-02-20 jrandom
* Only build failsafe tunnels if we need them
* Properly implement the selectNotFailingPeers so that we get a random
selection of peers, rather than using the strictOrdering (thanks dm!)
* Don't include too many "don't tell me about" peer references in the
lookup message - only send the 10 peer references closest to the target.
2005-02-19 jrandom
* Only build new extra tunnels on failure if we don't have enough

View File

@ -243,7 +243,7 @@ public class DatabaseLookupMessage extends I2NPMessageImpl {
buf.append("\n\tSearch Key: ").append(getSearchKey());
buf.append("\n\tFrom: ").append(getFrom());
buf.append("\n\tReply Tunnel: ").append(getReplyTunnel());
buf.append("\n\tDont Include Peers: ").append(getDontIncludePeers());
buf.append("\n\tDont Include Peers: ").append(_dontIncludePeers.size());
buf.append("]");
return buf.toString();
}

View File

@ -15,9 +15,9 @@ import net.i2p.CoreVersion;
*
*/
public class RouterVersion {
public final static String ID = "$Revision: 1.142 $ $Date: 2005/02/17 17:57:53 $";
public final static String ID = "$Revision: 1.143 $ $Date: 2005/02/19 18:20:57 $";
public final static String VERSION = "0.5";
public final static long BUILD = 1;
public final static long BUILD = 2;
public static void main(String args[]) {
System.out.println("I2P Router version: " + VERSION);
System.out.println("Router ID: " + RouterVersion.ID);

View File

@ -71,19 +71,18 @@ class ExploreJob extends SearchJob {
DatabaseLookupMessage msg = new DatabaseLookupMessage(getContext(), true);
msg.setSearchKey(getState().getTarget());
msg.setFrom(replyGateway.getIdentity().getHash());
msg.setDontIncludePeers(getState().getAttempted());
msg.setDontIncludePeers(getState().getClosestAttempted(MAX_CLOSEST));
msg.setMessageExpiration(expiration);
msg.setReplyTunnel(replyTunnelId);
Set attempted = getState().getAttempted();
List peers = _peerSelector.selectNearestExplicit(getState().getTarget(), NUM_CLOSEST_TO_IGNORE, attempted, getFacade().getKBuckets());
Set toSkip = new HashSet(64);
toSkip.addAll(attempted);
toSkip.addAll(peers);
msg.setDontIncludePeers(toSkip);
int available = MAX_CLOSEST - msg.getDontIncludePeers().size();
if (available > 0) {
List peers = _peerSelector.selectNearestExplicit(getState().getTarget(), available, msg.getDontIncludePeers(), getFacade().getKBuckets());
msg.getDontIncludePeers().addAll(peers);
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Peers we don't want to hear about: " + toSkip);
_log.debug("Peers we don't want to hear about: " + msg.getDontIncludePeers());
return msg;
}

View File

@ -53,6 +53,8 @@ class SearchJob extends JobImpl {
private static final int SEARCH_BREDTH = 3; // 3 peers at a time
private static final int SEARCH_PRIORITY = 400; // large because the search is probably for a real search
/** only send the 10 closest "dont tell me about" refs */
static final int MAX_CLOSEST = 10;
/**
* How long will we give each peer to reply to our search?
@ -371,7 +373,7 @@ class SearchJob extends JobImpl {
DatabaseLookupMessage msg = new DatabaseLookupMessage(getContext(), true);
msg.setSearchKey(_state.getTarget());
msg.setFrom(replyGateway.getIdentity().getHash());
msg.setDontIncludePeers(_state.getAttempted());
msg.setDontIncludePeers(_state.getClosestAttempted(MAX_CLOSEST));
msg.setMessageExpiration(expiration);
msg.setReplyTunnel(replyTunnelId);
return msg;
@ -386,7 +388,7 @@ class SearchJob extends JobImpl {
DatabaseLookupMessage msg = new DatabaseLookupMessage(getContext(), true);
msg.setSearchKey(_state.getTarget());
msg.setFrom(getContext().routerHash());
msg.setDontIncludePeers(_state.getAttempted());
msg.setDontIncludePeers(_state.getClosestAttempted(MAX_CLOSEST));
msg.setMessageExpiration(expiration);
msg.setReplyTunnel(null);
return msg;

View File

@ -6,7 +6,9 @@ import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import java.util.TreeSet;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.router.RouterContext;
@ -48,6 +50,25 @@ class SearchState {
return (Set)_attemptedPeers.clone();
}
}
public Set getClosestAttempted(int max) {
synchronized (_attemptedPeers) {
return locked_getClosest(_attemptedPeers, max, _searchKey);
}
}
private Set locked_getClosest(Set peers, int max, Hash target) {
if (_attemptedPeers.size() <= max)
return new HashSet(_attemptedPeers);
TreeSet closest = new TreeSet(new XORComparator(target));
closest.addAll(_attemptedPeers);
HashSet rv = new HashSet(max);
int i = 0;
for (Iterator iter = closest.iterator(); iter.hasNext() && i < max; i++) {
rv.add(iter.next());
}
return rv;
}
public boolean wasAttempted(Hash peer) {
synchronized (_attemptedPeers) {
return _attemptedPeers.contains(peer);

View File

@ -0,0 +1,30 @@
package net.i2p.router.networkdb.kademlia;
import java.util.Comparator;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
/**
* Help sort Hashes in relation to a base key using the XOR metric
*
*/
class XORComparator implements Comparator {
private Hash _base;
/**
* @param target key to compare distances with
*/
public XORComparator(Hash target) {
_base = target;
}
public int compare(Object lhs, Object rhs) {
if (lhs == null) throw new NullPointerException("LHS is null");
if (rhs == null) throw new NullPointerException("RHS is null");
if ( (lhs instanceof Hash) && (rhs instanceof Hash) ) {
byte lhsDelta[] = DataHelper.xor(((Hash)lhs).getData(), _base.getData());
byte rhsDelta[] = DataHelper.xor(((Hash)rhs).getData(), _base.getData());
return DataHelper.compareTo(lhsDelta, rhsDelta);
} else {
throw new ClassCastException(lhs.getClass().getName() + " / " + rhs.getClass().getName());
}
}
}

View File

@ -44,6 +44,8 @@ public class ProfileOrganizer {
private Map _wellIntegratedPeers;
/** H(routerIdentity) to PeerProfile for all peers that are not failing horribly */
private Map _notFailingPeers;
/** H(routerIdnetity), containing elements in _notFailingPeers */
private List _notFailingPeersList;
/** H(routerIdentity) to PeerProfile for all peers that ARE failing horribly (but that we haven't dropped reference to yet) */
private Map _failingPeers;
/** who are we? */
@ -91,7 +93,8 @@ public class ProfileOrganizer {
_fastPeers = new HashMap(16);
_highCapacityPeers = new HashMap(16);
_wellIntegratedPeers = new HashMap(16);
_notFailingPeers = new HashMap(16);
_notFailingPeers = new HashMap(64);
_notFailingPeersList = new ArrayList(64);
_failingPeers = new HashMap(16);
_strictCapacityOrder = new TreeSet(_comp);
_thresholdSpeedValue = 0.0d;
@ -285,8 +288,20 @@ public class ProfileOrganizer {
*
*/
public void selectNotFailingPeers(int howMany, Set exclude, Set matches) {
selectNotFailingPeers(howMany, exclude, matches, false);
}
/**
* Return a set of Hashes for peers that are not failing, preferring ones that
* we are already talking with
*
* @param howMany how many peers to find
* @param exclude what peers to skip (may be null)
* @param matches set to store the matches in
* @param onlyNotFailing if true, don't include any high capacity peers
*/
public void selectNotFailingPeers(int howMany, Set exclude, Set matches, boolean onlyNotFailing) {
if (matches.size() < howMany)
selectActiveNotFailingPeers(howMany, exclude, matches);
selectAllNotFailingPeers(howMany, exclude, matches, onlyNotFailing);
return;
}
/**
@ -294,6 +309,7 @@ public class ProfileOrganizer {
* talking with.
*
*/
/*
private void selectActiveNotFailingPeers(int howMany, Set exclude, Set matches) {
if (true) {
selectAllNotFailingPeers(howMany, exclude, matches);
@ -319,30 +335,39 @@ public class ProfileOrganizer {
selectAllNotFailingPeers(howMany, exclude, matches);
return;
}
*/
/**
* Return a set of Hashes for peers that are not failing.
*
*/
private void selectAllNotFailingPeers(int howMany, Set exclude, Set matches) {
private void selectAllNotFailingPeers(int howMany, Set exclude, Set matches, boolean onlyNotFailing) {
if (matches.size() < howMany) {
int orig = matches.size();
int needed = howMany - orig;
int start = 0;
List selected = new ArrayList(needed);
synchronized (_reorganizeLock) {
for (Iterator iter = _strictCapacityOrder.iterator(); selected.size() < needed && iter.hasNext(); ) {
PeerProfile prof = (PeerProfile)iter.next();
if (matches.contains(prof.getPeer()) ||
(exclude != null && exclude.contains(prof.getPeer())) ||
_failingPeers.containsKey(prof.getPeer()) ) {
// we randomize the whole list when rebuilding it, but randomizing
// the entire list on each peer selection is a bit crazy
start = _context.random().nextInt(_notFailingPeersList.size());
for (int i = 0; i < _notFailingPeersList.size() && selected.size() < needed; i++) {
int curIndex = (i+start) % _notFailingPeersList.size();
Hash cur = (Hash)_notFailingPeersList.get(curIndex);
if (matches.contains(cur) ||
(exclude != null && exclude.contains(cur))) {
continue;
} else if (onlyNotFailing && _highCapacityPeers.containsKey(cur)) {
// we dont want the good peers, just random ones
continue;
} else {
if (isOk(prof.getPeer()))
selected.add(prof.getPeer());
if (isOk(cur))
selected.add(cur);
}
}
}
if (_log.shouldLog(Log.INFO))
_log.info("Selecting all not failing found " + (matches.size()-orig) + " new peers: " + selected);
_log.info("Selecting all not failing (strict? " + onlyNotFailing + " start=" + start
+ ") found " + selected.size() + " new peers: " + selected);
matches.addAll(selected);
}
if (matches.size() < howMany) {
@ -408,6 +433,7 @@ public class ProfileOrganizer {
_fastPeers.clear();
_highCapacityPeers.clear();
_notFailingPeers.clear();
_notFailingPeersList.clear();
_wellIntegratedPeers.clear();
for (Iterator iter = allPeers.iterator(); iter.hasNext(); ) {
@ -417,7 +443,9 @@ public class ProfileOrganizer {
locked_unfailAsNecessary();
locked_promoteFastAsNecessary();
Collections.shuffle(_notFailingPeersList, _context.random());
if (_log.shouldLog(Log.DEBUG)) {
_log.debug("Profiles reorganized. averages: [integration: " + _thresholdIntegrationValue
+ ", capacity: " + _thresholdCapacityValue + ", speed: " + _thresholdSpeedValue + "]");
@ -654,12 +682,11 @@ public class ProfileOrganizer {
/** called after locking the reorganizeLock */
private PeerProfile locked_getProfile(Hash peer) {
if (_notFailingPeers.containsKey(peer))
return (PeerProfile)_notFailingPeers.get(peer);
else if (_failingPeers.containsKey(peer))
return (PeerProfile)_failingPeers.get(peer);
else
return null;
PeerProfile cur = (PeerProfile)_notFailingPeers.get(peer);
if (cur != null)
return cur;
cur = (PeerProfile)_failingPeers.get(peer);
return cur;
}
/**
@ -717,6 +744,7 @@ public class ProfileOrganizer {
_highCapacityPeers.remove(profile.getPeer());
_wellIntegratedPeers.remove(profile.getPeer());
_notFailingPeers.remove(profile.getPeer());
_notFailingPeersList.remove(profile.getPeer());
} else {
_failingPeers.remove(profile.getPeer());
_fastPeers.remove(profile.getPeer());
@ -724,6 +752,7 @@ public class ProfileOrganizer {
_wellIntegratedPeers.remove(profile.getPeer());
_notFailingPeers.put(profile.getPeer(), profile);
_notFailingPeersList.add(profile.getPeer());
if (_thresholdCapacityValue <= profile.getCapacityValue()) {
_highCapacityPeers.put(profile.getPeer(), profile);
if (_log.shouldLog(Log.DEBUG))

View File

@ -18,7 +18,7 @@ class ExploratoryPeerSelector extends TunnelPeerSelector {
if (length < 0)
return null;
HashSet matches = new HashSet(length);
ctx.profileOrganizer().selectNotFailingPeers(length, null, matches);
ctx.profileOrganizer().selectNotFailingPeers(length, null, matches, true);
matches.remove(ctx.routerHash());
ArrayList rv = new ArrayList(matches);

View File

@ -330,6 +330,26 @@ public class TunnelPool {
void buildFake() { buildFake(true); }
void buildFake(boolean zeroHop) {
int quantity = _settings.getBackupQuantity() + _settings.getQuantity();
boolean needed = true;
synchronized (_tunnels) {
if (_tunnels.size() > quantity) {
int valid = 0;
for (int i = 0; i < _tunnels.size(); i++) {
TunnelInfo info = (TunnelInfo)_tunnels.get(i);
if (info.getExpiration() > _context.clock().now()) {
valid++;
if (valid >= quantity)
break;
}
}
if (valid >= quantity)
needed = false;
}
}
if (!needed) return;
if (_log.shouldLog(Log.INFO))
_log.info(toString() + ": building a fake tunnel (allow zeroHop? " + zeroHop + ")");
Object tempToken = new Object();