Compare commits

...

3 Commits

11 changed files with 40 additions and 26 deletions

View File

@ -39,6 +39,7 @@ import net.i2p.data.KeysAndCert;
import net.i2p.data.Signature;
import net.i2p.data.SimpleDataStructure;
import net.i2p.router.Router;
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.util.OrderedProperties;
@ -723,4 +724,9 @@ public class RouterInfo extends DatabaseEntry {
if (fail)
System.exit(1);
}
public boolean isFloodfill() {
String caps = this.getCapabilities();
return caps.indexOf(FloodfillNetworkDatabaseFacade.CAPABILITY_FLOODFILL) >= 0;
}
}

View File

@ -35,7 +35,7 @@ import net.i2p.util.SystemVersion;
*/
public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacade {
public static final char CAPABILITY_FLOODFILL = 'f';
private static final String MINIMUM_SUBDB_PEERS = "router.subDbMinimumPeers";
private static final String ALWAYS_CONSIDER_PEER_FLOODFILL = "router.ignoreFloodfillCapability";
private final Map<Hash, FloodSearchJob> _activeFloodQueries;
private boolean _floodfillEnabled;
private final Set<Hash> _verifiesInProgress;
@ -98,6 +98,10 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
_ffMonitor = new FloodfillMonitorJob(_context, this);
}
public boolean ignoreFloodfillCapability() {
return _context.getBooleanProperty(ALWAYS_CONSIDER_PEER_FLOODFILL);
}
@Override
public synchronized void startup() {
boolean isFF;
@ -435,11 +439,20 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
/**
* @param peer may be null, returns false if null
* always returns true if ignoreFloodfillCapability()
*/
public static boolean isFloodfill(RouterInfo peer) {
if (peer == null) return false;
String caps = peer.getCapabilities();
return caps.indexOf(CAPABILITY_FLOODFILL) >= 0;
public boolean isFloodfill(RouterInfo peer) {
if (ignoreFloodfillCapability()) return true;
return peer.isFloodfill();
}
/**
* @param peer may be null, returns false if null
* always returns false if ignoreFloodfillCapability()
*/
public boolean isNotFloodfill(RouterInfo peer) {
if (ignoreFloodfillCapability()) return false;
return !peer.isFloodfill();
}
public List<RouterInfo> getKnownRouterData() {

View File

@ -377,7 +377,7 @@ class FloodfillPeerSelector extends PeerSelector {
//if (info == null)
// return;
if (info != null && FloodfillNetworkDatabaseFacade.isFloodfill(info)) {
if (info != null && ((FloodfillNetworkDatabaseFacade) _context.netDb()).isFloodfill(info)) {
_floodfillMatches.add(entry);
} else {
// This didn't really work because we stopped filling up when _wanted == _matches,

View File

@ -39,7 +39,7 @@ public class HandleFloodfillDatabaseLookupMessageJob extends HandleDatabaseLooku
@Override
protected boolean answerAllQueries() {
if (!getContext().netDb().floodfillEnabled()) return false;
return FloodfillNetworkDatabaseFacade.isFloodfill(getContext().router().getRouterInfo());
return ((FloodfillNetworkDatabaseFacade)getContext().netDb()).isFloodfill(getContext().router().getRouterInfo());
}
/**

View File

@ -82,7 +82,7 @@ class IterativeLookupJob extends JobImpl {
}
newPeers++;
} else if (ri.getPublished() < getContext().clock().now() - 60*60*1000 ||
!FloodfillNetworkDatabaseFacade.isFloodfill(ri)) {
((FloodfillNetworkDatabaseFacade) getContext().netDb()).isNotFloodfill(ri)) {
// get an updated RI from the (now ff?) peer
// Only if original search used expl. tunnels
if (_search.getFromHash() == null) {

View File

@ -600,8 +600,9 @@ public class IterativeSearchJob extends FloodSearchJob {
return;
}
RouterInfo ri = getContext().netDb().lookupRouterInfoLocally(peer);
if (ri != null && !FloodfillNetworkDatabaseFacade.isFloodfill(ri)) {
if (_log.shouldLog(Log.INFO))
if (ri != null && ((FloodfillNetworkDatabaseFacade) getContext().netDb()).isNotFloodfill(ri)) {
if ((FloodfillNetworkDatabaseFacade) getContext().netDb()).ignoreFloodfillCapability())
if (_log.shouldLog(Log.INFO)))
_log.info(getJobId() + ": non-ff peer from DSRM " + peer);
return;
}

View File

@ -324,7 +324,7 @@ class SearchJob extends JobImpl {
_state.replyTimeout(peer);
} else {
RouterInfo ri = (RouterInfo)ds;
if (!FloodfillNetworkDatabaseFacade.isFloodfill(ri)) {
if (((FloodfillNetworkDatabaseFacade) _facade).isNotFloodfill(ri)) {
_floodfillPeersExhausted = true;
if (onlyFloodfill)
continue;
@ -481,7 +481,7 @@ class SearchJob extends JobImpl {
SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(getContext(), router, _state, _facade,
this, outTunnel, inTunnel);
if (FloodfillNetworkDatabaseFacade.isFloodfill(router))
if (((FloodfillNetworkDatabaseFacade) _facade).isFloodfill(router))
_floodfillSearchesOutstanding++;
getContext().messageRegistry().registerPending(sel, reply, new FailedJob(getContext(), router));
// TODO pass a priority to the dispatcher
@ -517,7 +517,7 @@ class SearchJob extends JobImpl {
SendMessageDirectJob j = new SendMessageDirectJob(getContext(), msg, to,
reply, new FailedJob(getContext(), router), sel, timeout,
OutNetMessage.PRIORITY_EXPLORATORY, _msgIDBloomXor);
if (FloodfillNetworkDatabaseFacade.isFloodfill(router))
if (((FloodfillNetworkDatabaseFacade) _facade).isFloodfill(router))
_floodfillSearchesOutstanding++;
j.runJob();
//getContext().jobQueue().addJob(j);
@ -608,7 +608,7 @@ class SearchJob extends JobImpl {
_penalizePeer = penalizePeer;
_peer = peer.getIdentity().getHash();
_sentOn = enclosingContext.clock().now();
_isFloodfill = FloodfillNetworkDatabaseFacade.isFloodfill(peer);
_isFloodfill = ((FloodfillNetworkDatabaseFacade) _facade).isFloodfill(peer);
}
public void runJob() {
if (_isFloodfill)

View File

@ -44,7 +44,7 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
super(context);
_log = context.logManager().getLog(SearchUpdateReplyFoundJob.class);
_peer = peer.getIdentity().getHash();
_isFloodfillPeer = FloodfillNetworkDatabaseFacade.isFloodfill(peer);
_isFloodfillPeer = ((FloodfillNetworkDatabaseFacade) facade).isFloodfill(peer);
_state = state;
_facade = facade;
_job = job;

View File

@ -48,7 +48,7 @@ class SingleLookupJob extends JobImpl {
if (ri == null)
getContext().jobQueue().addJob(new SingleSearchJob(getContext(), peer, from));
else if (ri.getPublished() < getContext().clock().now() - 60*60*1000 ||
!FloodfillNetworkDatabaseFacade.isFloodfill(ri))
((FloodfillNetworkDatabaseFacade) getContext().netDb()).isNotFloodfill(ri))
getContext().jobQueue().addJob(new SingleSearchJob(getContext(), peer, peer));
}
}

View File

@ -121,7 +121,7 @@ class CapacityCalculator {
if (ndb != null) {
RouterInfo ri = (RouterInfo) ndb.lookupLocallyWithoutValidation(profile.getPeer());
if (ri != null) {
if (!FloodfillNetworkDatabaseFacade.isFloodfill(ri))
if (((FloodfillNetworkDatabaseFacade) ndb).isNotFloodfill(ri))
capacity += BONUS_NON_FLOODFILL;
String caps = ri.getCapabilities();
if (caps.indexOf(Router.CAPABILITY_REACHABLE) < 0)

View File

@ -130,15 +130,9 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
return;
if (!ri.isValid())
return;
RouterInfo oldri = _context.netDb().lookupRouterInfoLocally(key);
// only update if RI is newer and non-ff
if (oldri != null && oldri.getPublished() < ri.getPublished() &&
!FloodfillNetworkDatabaseFacade.isFloodfill(ri)) {
if (_log.shouldLog(Log.WARN))
_log.warn("Updating caps for RI " + key + " from \"" +
oldri.getCapabilities() + "\" to \"" + ri.getCapabilities() + '"');
_context.peerManager().setCapabilities(key, ri.getCapabilities());
}
// RouterInfo oldri = _context.netDb().lookupRouterInfoLocally(key);
// don't update caps if the RouterInfo came down a client tunnel, as it may be(Almost certainly is) malicious
// and we deny RI storage in client DB's anyway
return;
} else if (dsm.getReplyToken() != 0) {
_context.statManager().addRateData("tunnel.dropDangerousClientTunnelMessage", 1, type);