forked from I2P_Developers/i2p.i2p
Compare commits
35 Commits
i2p-2.3.0-
...
i2p.i2p.2.
Author | SHA1 | Date | |
---|---|---|---|
9a4accaf6b | |||
4d30690ab9 | |||
daa2ae1f6a | |||
c646099cda | |||
ce14830743 | |||
957bbf8d6a | |||
d06d60f3b6 | |||
5b9ddf1c26 | |||
a81c43b433 | |||
9a5f395704 | |||
dd27ece511 | |||
72e7747a4a | |||
99dbb1ba47 | |||
b14bfb983f | |||
5cd6d1b7ec | |||
ffed1a339d | |||
2068d95862 | |||
a817ad8a07 | |||
90657340f1 | |||
9c3a7940b9 | |||
33bafc7cfd | |||
a99be5741c | |||
860455c97b | |||
cb284a5479 | |||
57b397def3 | |||
5d7dbee39e | |||
a1ea090c00 | |||
f3d1cae935 | |||
360d85fe95 | |||
054efb8642 | |||
809315c4d0 | |||
d2263492fe | |||
868a1d14e2 | |||
ed0a89b422 | |||
c4d3673fa1 |
@ -90,11 +90,11 @@ public class ConfigKeyringHandler extends FormHandler {
|
||||
return;
|
||||
}
|
||||
// from BlindCache
|
||||
List<String> clientBase32s = _context.netDbSegmentor().lookupClientBySigningPublicKey(spk);
|
||||
List<Hash> clientBase32s = _context.netDbSegmentor().lookupClientBySigningPublicKey(spk);
|
||||
// TODO: This updates all of the blind data for all clients, turning the blind cache into a shared context for the owner of an encrypted leaseSet.
|
||||
// This is probably not ideal, with some social-engineering a service operator who owns an encrypted destination could associate 2 tunnels.
|
||||
// How realistic is it? Maybe not very, but I don't like it. Still, this is better than nothing.
|
||||
for (String clientBase32 : clientBase32s) {
|
||||
for (Hash clientBase32 : clientBase32s) {
|
||||
BlindData bdold = _context.clientNetDb(clientBase32).getBlindData(spk);
|
||||
if (bdold != null && d == null)
|
||||
d = bdold.getDestination();
|
||||
|
@ -6,11 +6,14 @@ import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import net.i2p.crypto.EncType;
|
||||
import net.i2p.crypto.SigType;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.router.RouterInfo;
|
||||
import net.i2p.util.SystemVersion;
|
||||
import net.i2p.router.sybil.Analysis;
|
||||
import net.i2p.router.web.FormHandler;
|
||||
@ -327,7 +330,7 @@ public class NetDbHelper extends FormHandler {
|
||||
return getNetDbSummary(null, false);
|
||||
}
|
||||
|
||||
public String getNetDbSummary(String client, boolean clientOnly) {
|
||||
public String getNetDbSummary(Hash client, boolean clientOnly) {
|
||||
NetDbRenderer renderer = new NetDbRenderer(_context);
|
||||
try {
|
||||
if (client == null && !clientOnly)
|
||||
@ -359,7 +362,7 @@ public class NetDbHelper extends FormHandler {
|
||||
} else if (_full == 6) {
|
||||
renderer.renderStatusHTML(_out, _limit, _page, _full, null, true);
|
||||
} else if (_clientOnly && client == null) {
|
||||
for (String _client : _context.netDbSegmentor().getClients()) {
|
||||
for (Hash _client : _context.clientManager().getPrimaryHashes()) {
|
||||
renderer.renderLeaseSetHTML(_out, _debug, _client, clientOnly);
|
||||
}
|
||||
} else {
|
||||
@ -373,7 +376,7 @@ public class NetDbHelper extends FormHandler {
|
||||
return "";
|
||||
}
|
||||
|
||||
public String getClientNetDbSummary(String client) {
|
||||
public String getClientNetDbSummary(Hash client) {
|
||||
return getNetDbSummary(client, true);
|
||||
}
|
||||
|
||||
@ -428,6 +431,10 @@ public class NetDbHelper extends FormHandler {
|
||||
continue; // can't nav to lookup
|
||||
if (i > 2 && i != tab && !isAdvanced())
|
||||
continue;
|
||||
if (i == 10 || i == 11) {
|
||||
if (_context.netDbSegmentor().getRoutersKnownToClients().size() == 0)
|
||||
continue;
|
||||
}
|
||||
if (i == tab) {
|
||||
// we are there
|
||||
if (span)
|
||||
|
@ -127,15 +127,19 @@ class NetDbRenderer {
|
||||
String country, String family, String caps,
|
||||
String ip, String sybil, int port, int highPort, SigType type, EncType etype,
|
||||
String mtu, String ipv6, String ssucaps,
|
||||
String tr, int cost, int icount, String client, boolean allClients) throws IOException {
|
||||
String tr, int cost, int icount, Hash client, boolean allClients) throws IOException {
|
||||
StringBuilder buf = new StringBuilder(4*1024);
|
||||
List<Hash> sybils = sybil != null ? new ArrayList<Hash>(128) : null;
|
||||
FloodfillNetworkDatabaseFacade netdb = _context.netDb();
|
||||
if (allClients) {
|
||||
netdb = _context.netDb();
|
||||
}else{
|
||||
if (client != null)
|
||||
if (client != null) {
|
||||
Log _log = _context.logManager().getLog(NetDbRenderer.class);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("client subdb for: " + client);
|
||||
netdb = _context.clientNetDb(client);
|
||||
}
|
||||
else
|
||||
netdb = _context.netDb();
|
||||
}
|
||||
@ -608,7 +612,7 @@ class NetDbRenderer {
|
||||
* @param debug @since 0.7.14 sort by distance from us, display
|
||||
* median distance, and other stuff, useful when floodfill
|
||||
*/
|
||||
public void renderLeaseSetHTML(Writer out, boolean debug, String client, boolean clientsOnly) throws IOException {
|
||||
public void renderLeaseSetHTML(Writer out, boolean debug, Hash client, boolean clientsOnly) throws IOException {
|
||||
StringBuilder buf = new StringBuilder(4*1024);
|
||||
if (debug)
|
||||
buf.append("<p id=\"debugmode\">Debug mode - Sorted by hash distance, closest first</p>\n");
|
||||
@ -619,8 +623,12 @@ class NetDbRenderer {
|
||||
if (clientsOnly){
|
||||
netdb = _context.netDb();
|
||||
}else{
|
||||
if (client != null)
|
||||
if (client != null) {
|
||||
Log _log = _context.logManager().getLog(NetDbRenderer.class);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("client subdb for: " + client);
|
||||
netdb = _context.clientNetDb(client);
|
||||
}
|
||||
else
|
||||
netdb = _context.netDb();
|
||||
}
|
||||
@ -635,8 +643,9 @@ class NetDbRenderer {
|
||||
}
|
||||
if (clientsOnly)
|
||||
leases.addAll(_context.netDbSegmentor().getLeasesKnownToClients());
|
||||
else
|
||||
else{
|
||||
leases.addAll(netdb.getLeases());
|
||||
}
|
||||
int medianCount = 0;
|
||||
int rapCount = 0;
|
||||
BigInteger median = null;
|
||||
@ -951,7 +960,7 @@ class NetDbRenderer {
|
||||
* @param mode 0: charts only; 1: full routerinfos; 2: abbreviated routerinfos
|
||||
* mode 3: Same as 0 but sort countries by count
|
||||
*/
|
||||
public void renderStatusHTML(Writer out, int pageSize, int page, int mode, String client, boolean clientsOnly) throws IOException {
|
||||
public void renderStatusHTML(Writer out, int pageSize, int page, int mode, Hash client, boolean clientsOnly) throws IOException {
|
||||
if (!_context.netDb().isInitialized()) {
|
||||
out.write("<div id=\"notinitialized\">");
|
||||
out.write(_t("Not initialized"));
|
||||
|
@ -632,7 +632,7 @@ public class SummaryHelper extends HelperBase {
|
||||
else
|
||||
buf.append(DataHelper.escapeHTML(ServletUtil.truncate(name, 29))).append("…");
|
||||
buf.append("</a></b></td>\n");
|
||||
LeaseSet ls = _context.netDbSegmentor().lookupLeaseSetHashIsClient(h);
|
||||
LeaseSet ls = _context.clientNetDb(client.calculateHash()).lookupLeaseSetLocally(h);
|
||||
if (ls != null && _context.tunnelManager().getOutboundClientTunnelCount(h) > 0) {
|
||||
if (!ls.isCurrent(0)) {
|
||||
// yellow light
|
||||
|
@ -20,6 +20,7 @@ import net.i2p.data.Hash;
|
||||
import net.i2p.data.LeaseSet;
|
||||
import net.i2p.data.i2cp.MessageId;
|
||||
import net.i2p.data.i2cp.SessionConfig;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
|
||||
|
||||
/**
|
||||
* Manage all interactions with clients
|
||||
@ -121,4 +122,30 @@ public abstract class ClientManagerFacade implements Service {
|
||||
* @since 0.9.41
|
||||
*/
|
||||
public void unregisterMetaDest(Destination dest) {}
|
||||
|
||||
/**
|
||||
* get the FloodfillNetworkDatabaseFacade associated with a particular client destination.
|
||||
* This is inside the runner, so it won't be there if the runner isn't ready.
|
||||
*
|
||||
* @param destHash destination hash associated with the client who's subDb we're looking for
|
||||
* @return non-null FloodfillNetworkDatabaseFacade
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public abstract FloodfillNetworkDatabaseFacade getClientFloodfillNetworkDatabaseFacade(Hash destHash);
|
||||
|
||||
/**
|
||||
* get all of the FloodfillNetworkDatabaseFacades for all of the clients.
|
||||
*
|
||||
* @return non-null set of FloodfillNetworkDatabaseFacades
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public abstract Set<FloodfillNetworkDatabaseFacade> getClientFloodfillNetworkDatabaseFacades();
|
||||
|
||||
/**
|
||||
* get a set of all primary hashes
|
||||
*
|
||||
* @return non-null set of Hashes
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public abstract Set<Hash> getPrimaryHashes();
|
||||
}
|
||||
|
@ -375,7 +375,6 @@ public class RouterContext extends I2PAppContext {
|
||||
public SegmentedNetworkDatabaseFacade netDbSegmentor() { return _netDb; }
|
||||
public FloodfillNetworkDatabaseFacade netDb() { return _netDb.mainNetDB(); }
|
||||
public FloodfillNetworkDatabaseFacade multihomeNetDb() { return _netDb.multiHomeNetDB(); }
|
||||
public FloodfillNetworkDatabaseFacade clientNetDb(String id) { return _netDb.clientNetDB(id); }
|
||||
public FloodfillNetworkDatabaseFacade clientNetDb(Hash id) { return _netDb.clientNetDB(id); }
|
||||
/**
|
||||
* The actual driver of the router, where all jobs are enqueued and processed.
|
||||
|
@ -20,7 +20,7 @@ public class RouterVersion {
|
||||
public final static String VERSION = CoreVersion.VERSION;
|
||||
/** for example: "beta", "alpha", "rc" */
|
||||
public final static String STATUS = "";
|
||||
public final static long BUILD = 4;
|
||||
public final static long BUILD = 5;
|
||||
/** for example "-test" */
|
||||
public final static String EXTRA = "";
|
||||
public final static String FULL_VERSION = VERSION + "-" + STATUS + BUILD + EXTRA;
|
||||
|
@ -49,6 +49,8 @@ import net.i2p.router.JobImpl;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.crypto.TransientSessionKeyManager;
|
||||
import net.i2p.router.crypto.ratchet.RatchetSKM;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseSegmentor;
|
||||
import net.i2p.router.crypto.ratchet.MuxedSKM;
|
||||
import net.i2p.util.ConcurrentHashSet;
|
||||
import net.i2p.util.I2PThread;
|
||||
@ -90,6 +92,8 @@ class ClientConnectionRunner {
|
||||
protected I2CPMessageReader _reader;
|
||||
/** Used for all sessions, which must all have the same crypto keys */
|
||||
private SessionKeyManager _sessionKeyManager;
|
||||
/** Used for leaseSets sent to and recieved from this client */
|
||||
private FloodfillNetworkDatabaseFacade _floodfillNetworkDatabaseFacade;
|
||||
/**
|
||||
* This contains the last 10 MessageIds that have had their (non-ack) status
|
||||
* delivered to the client (so that we can be sure only to update when necessary)
|
||||
@ -156,6 +160,8 @@ class ClientConnectionRunner {
|
||||
_alreadyProcessed = new ArrayList<MessageId>();
|
||||
_acceptedPending = new ConcurrentHashSet<MessageId>();
|
||||
_messageId = new AtomicInteger(_context.random().nextInt());
|
||||
// Set up the per-destination FloodfillNetworkDatabaseFacade to prevent clients from being able to
|
||||
// update leaseSet entries in the floodfill netDb
|
||||
}
|
||||
|
||||
private static final AtomicInteger __id = new AtomicInteger();
|
||||
@ -207,23 +213,25 @@ class ClientConnectionRunner {
|
||||
_acceptedPending.clear();
|
||||
if (_sessionKeyManager != null)
|
||||
_sessionKeyManager.shutdown();
|
||||
if (_floodfillNetworkDatabaseFacade != null)
|
||||
if (_floodfillNetworkDatabaseFacade.isClientDb())
|
||||
_floodfillNetworkDatabaseFacade.shutdown();
|
||||
if (_encryptedLSHash != null)
|
||||
_manager.unregisterEncryptedDestination(this, _encryptedLSHash);
|
||||
_manager.unregisterConnection(this);
|
||||
// netdb may be null in unit tests
|
||||
Hash dbid = getDestHash();
|
||||
if (_context.netDbSegmentor() != null) {
|
||||
if (_context.netDb() != null) {
|
||||
// Note that if the client sent us a destroy message,
|
||||
// removeSession() was called just before this, and
|
||||
// _sessions will be empty.
|
||||
for (SessionParams sp : _sessions.values()) {
|
||||
LeaseSet ls = sp.currentLeaseSet;
|
||||
if (ls != null)
|
||||
_context.clientNetDb(dbid).unpublish(ls);
|
||||
if (ls != null && getFloodfillNetworkDatabaseFacade() != null)
|
||||
getFloodfillNetworkDatabaseFacade().unpublish(ls);
|
||||
// unpublish encrypted LS also
|
||||
ls = sp.currentEncryptedLeaseSet;
|
||||
if (ls != null)
|
||||
_context.clientNetDb(dbid).unpublish(ls);
|
||||
if (ls != null && getFloodfillNetworkDatabaseFacade() != null)
|
||||
getFloodfillNetworkDatabaseFacade().unpublish(ls);
|
||||
if (!sp.isPrimary)
|
||||
_context.tunnelManager().removeAlias(sp.dest);
|
||||
}
|
||||
@ -459,12 +467,12 @@ class ClientConnectionRunner {
|
||||
// Tell client manger
|
||||
_manager.unregisterSession(id, sp.dest);
|
||||
LeaseSet ls = sp.currentLeaseSet;
|
||||
if (ls != null)
|
||||
_context.clientNetDb(dbid).unpublish(ls);
|
||||
if (ls != null && getFloodfillNetworkDatabaseFacade() != null)
|
||||
getFloodfillNetworkDatabaseFacade().unpublish(ls);
|
||||
// unpublish encrypted LS also
|
||||
ls = sp.currentEncryptedLeaseSet;
|
||||
if (ls != null)
|
||||
_context.clientNetDb(dbid).unpublish(ls);
|
||||
if (ls != null && getFloodfillNetworkDatabaseFacade() != null)
|
||||
getFloodfillNetworkDatabaseFacade().unpublish(ls);
|
||||
isPrimary = sp.isPrimary;
|
||||
if (isPrimary)
|
||||
_context.tunnelManager().removeTunnels(sp.dest);
|
||||
@ -484,12 +492,12 @@ class ClientConnectionRunner {
|
||||
_log.info("Destroying remaining client subsession " + sp.sessionId);
|
||||
_manager.unregisterSession(sp.sessionId, sp.dest);
|
||||
LeaseSet ls = sp.currentLeaseSet;
|
||||
if (ls != null)
|
||||
_context.clientNetDb(dbid).unpublish(ls);
|
||||
if (ls != null && getFloodfillNetworkDatabaseFacade() != null)
|
||||
getFloodfillNetworkDatabaseFacade().unpublish(ls);
|
||||
// unpublish encrypted LS also
|
||||
ls = sp.currentEncryptedLeaseSet;
|
||||
if (ls != null)
|
||||
_context.clientNetDb(dbid).unpublish(ls);
|
||||
if (ls != null && getFloodfillNetworkDatabaseFacade() != null)
|
||||
getFloodfillNetworkDatabaseFacade().unpublish(ls);
|
||||
_context.tunnelManager().removeAlias(sp.dest);
|
||||
synchronized(this) {
|
||||
if (sp.rerequestTimer != null)
|
||||
@ -564,6 +572,18 @@ class ClientConnectionRunner {
|
||||
public int sessionEstablished(SessionConfig config) {
|
||||
Destination dest = config.getDestination();
|
||||
Hash destHash = dest.calculateHash();
|
||||
if (destHash != null){
|
||||
if (_log.shouldLog(Log.DEBUG)) {
|
||||
_log.debug("Initializing subDb for client" + destHash);
|
||||
}
|
||||
_floodfillNetworkDatabaseFacade = new FloodfillNetworkDatabaseFacade(_context, destHash);
|
||||
_floodfillNetworkDatabaseFacade.startup();
|
||||
} else {
|
||||
if (_log.shouldLog(Log.ERROR)) {
|
||||
_log.error("Initializing subDb for unknown client" + dest, new Exception());
|
||||
}
|
||||
_floodfillNetworkDatabaseFacade = null;
|
||||
}
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("SessionEstablished called for destination " + destHash);
|
||||
if (_sessions.size() > MAX_SESSIONS)
|
||||
@ -590,7 +610,6 @@ class ClientConnectionRunner {
|
||||
_dontSendMSM = "none".equals(opts.getProperty(I2PClient.PROP_RELIABILITY, "").toLowerCase(Locale.US));
|
||||
_dontSendMSMOnReceive = Boolean.parseBoolean(opts.getProperty(I2PClient.PROP_FAST_RECEIVE));
|
||||
}
|
||||
|
||||
// Set up the
|
||||
// per-destination session key manager to prevent rather easy correlation
|
||||
// based on the specified encryption types in the config
|
||||
@ -1150,6 +1169,33 @@ class ClientConnectionRunner {
|
||||
*/
|
||||
private final static long REQUEUE_DELAY = 500;
|
||||
private static final int MAX_REQUEUE = 60; // 30 sec.
|
||||
|
||||
/**
|
||||
* Get the FloodfillNetworkDatabaseFacade for this runner. This is the client
|
||||
* netDb if the router is configured to use subDbs, or the main netDb if the
|
||||
* router is configured to use a monolithic netDb.
|
||||
*
|
||||
* If neither a client netDb or the main netDb is available, it will return null.
|
||||
* This should be impossible.
|
||||
* If you get the `getFloodfillNetworkDatabaseFacade is null for runner` warning,
|
||||
* the main netDb will be returned instead. If the main netDb is null, then null
|
||||
* will be returned.
|
||||
*
|
||||
* @return _floodfillNetworkDatabaseFacade
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public FloodfillNetworkDatabaseFacade getFloodfillNetworkDatabaseFacade() {
|
||||
if (!_context.netDbSegmentor().useSubDbs())
|
||||
return _context.netDb();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("getFloodfillNetworkDatabaseFacade is getting the subDb for dbid: " + this.getDestHash());
|
||||
if (_floodfillNetworkDatabaseFacade == null) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("getFloodfillNetworkDatabaseFacade is null for runner", new Exception());
|
||||
return _context.netDb();
|
||||
}
|
||||
return this._floodfillNetworkDatabaseFacade;
|
||||
}
|
||||
|
||||
private class MessageDeliveryStatusUpdate extends JobImpl {
|
||||
private final SessionId _sessId;
|
||||
|
@ -43,6 +43,8 @@ import net.i2p.router.ClientMessage;
|
||||
import net.i2p.router.Job;
|
||||
import net.i2p.router.JobImpl;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseSegmentor;
|
||||
import net.i2p.util.ConcurrentHashSet;
|
||||
import net.i2p.util.I2PThread;
|
||||
import net.i2p.util.Log;
|
||||
@ -771,6 +773,51 @@ class ClientManager {
|
||||
(new HandleJob(msg)).runJob();
|
||||
}
|
||||
|
||||
/**
|
||||
* get the FloodfillNetworkDatabaseFacade associated with a particular client destination.
|
||||
* This is inside the runner, so it won't be there if the runner isn't ready.
|
||||
*
|
||||
* @param destHash destination hash associated with the client who's subDb we're looking for
|
||||
* @return may be null if it does not exist
|
||||
*/
|
||||
public FloodfillNetworkDatabaseFacade getClientFloodfillNetworkDatabaseFacade(Hash destHash) {
|
||||
if (destHash != null) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Getting subDb for desthash: " + destHash);
|
||||
ClientConnectionRunner runner = getRunner(destHash);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("ClientManager got a runner in getClientFloodfillNetworkDatabaseFacade for " + destHash);
|
||||
return runner.getFloodfillNetworkDatabaseFacade();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* get all of the FloodfillNetworkDatabaseFacades for all of the clients.
|
||||
*
|
||||
* @return non-null
|
||||
*/
|
||||
public Set<FloodfillNetworkDatabaseFacade> getClientFloodfillNetworkDatabaseFacades() {
|
||||
Set<FloodfillNetworkDatabaseFacade> rv = new HashSet<FloodfillNetworkDatabaseFacade>();
|
||||
for (ClientConnectionRunner runner : _runners.values()) {
|
||||
if (runner != null)
|
||||
rv.add(runner.getFloodfillNetworkDatabaseFacade());
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* get all the primary hashes for all the clients and return them as a set
|
||||
*
|
||||
* @return
|
||||
*/
|
||||
public Set<Hash> getPrimaryHashes() {
|
||||
Set<Hash> rv = new HashSet<Hash>();
|
||||
for (ClientConnectionRunner runner : _runners.values())
|
||||
rv.add(runner.getDestHash());
|
||||
return rv;
|
||||
}
|
||||
|
||||
private class HandleJob extends JobImpl {
|
||||
private final ClientMessage _msg;
|
||||
|
||||
|
@ -28,6 +28,7 @@ import net.i2p.router.ClientManagerFacade;
|
||||
import net.i2p.router.ClientMessage;
|
||||
import net.i2p.router.Job;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
@ -290,4 +291,45 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade implements Inte
|
||||
if (_manager != null)
|
||||
_manager.unregisterMetaDest(dest);
|
||||
}
|
||||
|
||||
/**
|
||||
* get the FloodfillNetworkDatabaseFacade associated with a particular client destination.
|
||||
* This is inside the runner, so it won't be there if the runner isn't ready.
|
||||
*
|
||||
* @param destHash destination hash associated with the client who's subDb we're looking for
|
||||
* @return
|
||||
*/
|
||||
@Override
|
||||
public FloodfillNetworkDatabaseFacade getClientFloodfillNetworkDatabaseFacade(Hash destHash) {
|
||||
if (_manager != null)
|
||||
return _manager.getClientFloodfillNetworkDatabaseFacade(destHash);
|
||||
else
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* get all of the FloodfillNetworkDatabaseFacades for all of the clients.
|
||||
*
|
||||
* @return
|
||||
*/
|
||||
@Override
|
||||
public Set<FloodfillNetworkDatabaseFacade> getClientFloodfillNetworkDatabaseFacades() {
|
||||
if (_manager != null)
|
||||
return _manager.getClientFloodfillNetworkDatabaseFacades();
|
||||
else
|
||||
return Collections.emptySet();
|
||||
}
|
||||
|
||||
/**
|
||||
* get all the primary hashes for all the clients and return them as a set
|
||||
*
|
||||
* @return
|
||||
*/
|
||||
@Override
|
||||
public Set<Hash> getPrimaryHashes() {
|
||||
if (_manager != null)
|
||||
return _manager.getPrimaryHashes();
|
||||
else
|
||||
return Collections.emptySet();
|
||||
}
|
||||
}
|
||||
|
@ -711,13 +711,13 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
|
||||
}
|
||||
if (_log.shouldDebug())
|
||||
_log.debug("Publishing: " + ls);
|
||||
_context.clientNetDb(_runner.getDestHash()).publish(ls);
|
||||
_runner.getFloodfillNetworkDatabaseFacade().publish(ls);
|
||||
if (type == DatabaseEntry.KEY_TYPE_ENCRYPTED_LS2) {
|
||||
// store the decrypted ls also
|
||||
EncryptedLeaseSet encls = (EncryptedLeaseSet) ls;
|
||||
if (_log.shouldDebug())
|
||||
_log.debug("Storing decrypted: " + encls.getDecryptedLeaseSet());
|
||||
_context.clientNetDb(dest.getHash()).store(dest.getHash(), encls.getDecryptedLeaseSet());
|
||||
_runner.getFloodfillNetworkDatabaseFacade().store(dest.getHash(), encls.getDecryptedLeaseSet());
|
||||
}
|
||||
} catch (IllegalArgumentException iae) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
@ -861,9 +861,9 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
|
||||
_log.warn("Unsupported BlindingInfo type: " + message);
|
||||
return;
|
||||
}
|
||||
BlindData obd = _context.clientNetDb(_runner.getDestHash()).getBlindData(spk);
|
||||
BlindData obd = _runner.getFloodfillNetworkDatabaseFacade().getBlindData(spk);
|
||||
if (obd == null) {
|
||||
_context.clientNetDb(_runner.getDestHash()).setBlindData(bd);
|
||||
_runner.getFloodfillNetworkDatabaseFacade().setBlindData(bd);
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("New: " + bd);
|
||||
} else {
|
||||
@ -884,7 +884,7 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
|
||||
return;
|
||||
}
|
||||
}
|
||||
_context.clientNetDb(_runner.getDestHash()).setBlindData(bd);
|
||||
_runner.getFloodfillNetworkDatabaseFacade().setBlindData(bd);
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("Updated: " + bd);
|
||||
} else {
|
||||
@ -893,7 +893,7 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
|
||||
if (nexp > oexp) {
|
||||
obd.setExpiration(nexp);
|
||||
// to force save at shutdown
|
||||
_context.clientNetDb(_runner.getDestHash()).setBlindData(obd);
|
||||
_runner.getFloodfillNetworkDatabaseFacade().setBlindData(obd);
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("Updated expiration: " + obd);
|
||||
} else {
|
||||
|
@ -91,11 +91,7 @@ class LookupDestJob extends JobImpl {
|
||||
try {
|
||||
bd = Blinding.decode(context, b);
|
||||
SigningPublicKey spk = bd.getUnblindedPubKey();
|
||||
BlindData bd2;
|
||||
if (_fromLocalDest == null)
|
||||
bd2 = getContext().netDb().getBlindData(spk);
|
||||
else
|
||||
bd2 = getContext().clientNetDb(_fromLocalDest).getBlindData(spk);
|
||||
BlindData bd2 = _runner.getFloodfillNetworkDatabaseFacade().getBlindData(spk);
|
||||
if (bd2 != null) {
|
||||
// BlindData from database may have privkey or secret
|
||||
// check if we need it but don't have it
|
||||
@ -114,7 +110,7 @@ class LookupDestJob extends JobImpl {
|
||||
long exp = now + ((bd.getAuthRequired() || bd.getSecretRequired()) ? 365*24*60*60*1000L
|
||||
: 90*24*68*60*1000L);
|
||||
bd.setExpiration(exp);
|
||||
getContext().clientNetDb(_fromLocalDest).setBlindData(bd);
|
||||
_runner.getFloodfillNetworkDatabaseFacade().setBlindData(bd);
|
||||
}
|
||||
h = bd.getBlindedHash();
|
||||
if (_log.shouldDebug())
|
||||
@ -189,7 +185,7 @@ class LookupDestJob extends JobImpl {
|
||||
if (timeout > 1500)
|
||||
timeout -= 500;
|
||||
// TODO tell router this is an encrypted lookup, skip 38 or earlier ffs?
|
||||
getContext().clientNetDb(_fromLocalDest).lookupDestination(_hash, done, timeout, _fromLocalDest);
|
||||
_runner.getFloodfillNetworkDatabaseFacade().lookupDestination(_hash, done, timeout, _fromLocalDest);
|
||||
} else {
|
||||
// blinding decode fail
|
||||
returnFail(HostReplyMessage.RESULT_DECRYPTION_FAILURE);
|
||||
@ -208,10 +204,10 @@ class LookupDestJob extends JobImpl {
|
||||
}
|
||||
public String getName() { return "LeaseSet Lookup Reply to Client"; }
|
||||
public void runJob() {
|
||||
Destination dest = getContext().clientNetDb(_fromLocalDest).lookupDestinationLocally(_hash);
|
||||
Destination dest = _runner.getFloodfillNetworkDatabaseFacade().lookupDestinationLocally(_hash);
|
||||
if (dest == null && _blindData != null) {
|
||||
// TODO store and lookup original hash instead
|
||||
LeaseSet ls = getContext().clientNetDb(_fromLocalDest).lookupLeaseSetLocally(_hash);
|
||||
LeaseSet ls = _runner.getFloodfillNetworkDatabaseFacade().lookupLeaseSetLocally(_hash);
|
||||
if (ls != null && ls.getType() == DatabaseEntry.KEY_TYPE_ENCRYPTED_LS2) {
|
||||
// already decrypted
|
||||
EncryptedLeaseSet encls = (EncryptedLeaseSet) ls;
|
||||
|
@ -8,6 +8,9 @@ package net.i2p.router.dummy;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.crypto.SessionKeyManager;
|
||||
import net.i2p.data.Destination;
|
||||
import net.i2p.data.Hash;
|
||||
@ -18,6 +21,7 @@ import net.i2p.router.ClientManagerFacade;
|
||||
import net.i2p.router.ClientMessage;
|
||||
import net.i2p.router.Job;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
|
||||
|
||||
/**
|
||||
* Manage all interactions with clients
|
||||
@ -49,6 +53,14 @@ public class DummyClientManagerFacade extends ClientManagerFacade {
|
||||
public SessionKeyManager getClientSessionKeyManager(Hash _dest) { return null; }
|
||||
|
||||
public void requestLeaseSet(Hash dest, LeaseSet set) {}
|
||||
|
||||
public FloodfillNetworkDatabaseFacade getClientFloodfillNetworkDatabaseFacade(Hash dbid) {
|
||||
return null;
|
||||
}
|
||||
public Set<FloodfillNetworkDatabaseFacade> getClientFloodfillNetworkDatabaseFacades() {
|
||||
return Collections.emptySet();
|
||||
}
|
||||
public Set<Hash> getPrimaryHashes() {
|
||||
return Collections.emptySet();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -23,6 +23,7 @@ import net.i2p.data.router.RouterInfo;
|
||||
import net.i2p.router.Job;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseSegmentor;
|
||||
import net.i2p.router.networkdb.kademlia.KademliaNetworkDatabaseFacade;
|
||||
import net.i2p.router.networkdb.kademlia.SegmentedNetworkDatabaseFacade;
|
||||
|
||||
@ -33,16 +34,12 @@ public class DummyNetworkDatabaseFacade extends SegmentedNetworkDatabaseFacade {
|
||||
|
||||
public DummyNetworkDatabaseFacade(RouterContext ctx) {
|
||||
super(ctx);
|
||||
_fndb = new FloodfillNetworkDatabaseFacade(ctx, "dummy");
|
||||
_fndb = new FloodfillNetworkDatabaseFacade(ctx, FloodfillNetworkDatabaseSegmentor.MAIN_DBID);
|
||||
_fndb.startup();
|
||||
_routers = Collections.synchronizedMap(new HashMap<Hash, RouterInfo>());
|
||||
_context = ctx;
|
||||
}
|
||||
|
||||
public FloodfillNetworkDatabaseFacade getSubNetDB(String dbid){
|
||||
return null;
|
||||
}
|
||||
|
||||
public FloodfillNetworkDatabaseFacade getSubNetDB(Hash dbid){
|
||||
return null;
|
||||
}
|
||||
@ -98,11 +95,6 @@ public class DummyNetworkDatabaseFacade extends SegmentedNetworkDatabaseFacade {
|
||||
throw new UnsupportedOperationException("Unimplemented method 'lookupLeaseSetHashIsClient'");
|
||||
}
|
||||
|
||||
@Override
|
||||
public LeaseSet lookupLeaseSetLocally(Hash key, String dbid) {
|
||||
throw new UnsupportedOperationException("Unimplemented method 'lookupLeaseSetLocally'");
|
||||
}
|
||||
|
||||
@Override
|
||||
public FloodfillNetworkDatabaseFacade mainNetDB() {
|
||||
return _fndb;
|
||||
@ -113,26 +105,11 @@ public class DummyNetworkDatabaseFacade extends SegmentedNetworkDatabaseFacade {
|
||||
return _fndb;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FloodfillNetworkDatabaseFacade clientNetDB(String id) {
|
||||
return _fndb;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FloodfillNetworkDatabaseFacade clientNetDB(Hash id) {
|
||||
return _fndb;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDbidByHash(Hash clientKey) {
|
||||
throw new UnsupportedOperationException("Unimplemented method 'lookupLeaseSetHashIsClient'");
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getClients() {
|
||||
throw new UnsupportedOperationException("Unimplemented method 'getClients'");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<FloodfillNetworkDatabaseFacade> getSubNetDBs(){
|
||||
throw new UnsupportedOperationException("Unimplemented method 'getSubNetDBs'");
|
||||
|
@ -147,7 +147,10 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
// Only send it out if it is in our estimated keyspace.
|
||||
// For this, we do NOT use their dontInclude list as it can't be trusted
|
||||
// (i.e. it could mess up the closeness calculation)
|
||||
LeaseSet possibleMultihomed = getContext().multihomeNetDb().lookupLeaseSetLocally(searchKey);
|
||||
LeaseSet possibleMultihomed = null;
|
||||
if (getContext().netDbSegmentor().useSubDbs()) {
|
||||
possibleMultihomed = getContext().multihomeNetDb().lookupLeaseSetLocally(searchKey);
|
||||
}
|
||||
Set<Hash> closestHashes = getContext().netDb().findNearestRouters(searchKey,
|
||||
CLOSENESS_THRESHOLD, null);
|
||||
if (weAreClosest(closestHashes)) {
|
||||
@ -162,7 +165,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
_log.info("We have local LS " + searchKey + ", answering query, in our keyspace");
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalClosest", 1);
|
||||
sendData(searchKey, ls, fromKey, toTunnel);
|
||||
} else if (possibleMultihomed != null) {
|
||||
} else if (getContext().netDbSegmentor().useSubDbs() && possibleMultihomed != null) {
|
||||
// If it's in the possibleMultihomed cache, then it was definitely stored to us meaning it is effectively
|
||||
// always recievedAsPublished. No need to decide whether or not to answer the request like above, just
|
||||
// answer it so it doesn't look different from other stores.
|
||||
@ -181,8 +184,11 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
|
||||
}
|
||||
} else {
|
||||
LeaseSet possibleMultihomed = getContext().multihomeNetDb().lookupLeaseSetLocally(searchKey);
|
||||
if (possibleMultihomed != null) {
|
||||
LeaseSet possibleMultihomed = null;
|
||||
if (getContext().netDbSegmentor().useSubDbs()) {
|
||||
possibleMultihomed = getContext().multihomeNetDb().lookupLeaseSetLocally(searchKey);
|
||||
}
|
||||
if ((getContext().netDbSegmentor().useSubDbs()) && possibleMultihomed != null) {
|
||||
if (possibleMultihomed.getReceivedAsPublished()) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("We have local LS " + searchKey + " in our multihomes cache meaning it was stored to us. Answering query with the stored LS.");
|
||||
|
@ -66,10 +66,13 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
private static final long NEXT_RKEY_LS_ADVANCE_TIME = 10*60*1000;
|
||||
private static final int NEXT_FLOOD_QTY = 2;
|
||||
|
||||
public FloodfillNetworkDatabaseFacade(RouterContext context, String dbid) {
|
||||
public FloodfillNetworkDatabaseFacade(RouterContext context) {
|
||||
this(context, FloodfillNetworkDatabaseSegmentor.MAIN_DBID);
|
||||
}
|
||||
public FloodfillNetworkDatabaseFacade(RouterContext context, Hash dbid) {
|
||||
super(context, dbid);
|
||||
_activeFloodQueries = new HashMap<Hash, FloodSearchJob>();
|
||||
_verifiesInProgress = new ConcurrentHashSet<Hash>(8);
|
||||
_verifiesInProgress = new ConcurrentHashSet<Hash>(8);
|
||||
|
||||
long[] rate = new long[] { 60*60*1000L };
|
||||
_context.statManager().createRequiredRateStat("netDb.successTime", "Time for successful lookup (ms)", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
@ -123,7 +126,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
@Override
|
||||
protected void createHandlers() {
|
||||
// Only initialize the handlers for the flooodfill netDb.
|
||||
if (super._dbid.equals(FloodfillNetworkDatabaseSegmentor.MAIN_DBID)) {
|
||||
if (!isClientDb()) {
|
||||
if (_log.shouldInfo())
|
||||
_log.info("[dbid: " + super._dbid + "] Initializing the message handlers");
|
||||
_context.inNetMessagePool().registerHandlerJobBuilder(DatabaseLookupMessage.MESSAGE_TYPE, new FloodfillDatabaseLookupMessageHandler(_context, this));
|
||||
|
@ -1,25 +1,16 @@
|
||||
package net.i2p.router.networkdb.kademlia;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
//import java.rmi.dgc.Lease;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import net.i2p.data.BlindData;
|
||||
import net.i2p.data.DatabaseEntry;
|
||||
import net.i2p.data.Destination;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.LeaseSet;
|
||||
import net.i2p.data.SigningPublicKey;
|
||||
import net.i2p.data.TunnelId;
|
||||
import net.i2p.data.router.RouterInfo;
|
||||
import net.i2p.router.Job;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.networkdb.reseed.ReseedChecker;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
@ -61,13 +52,11 @@ import net.i2p.util.Log;
|
||||
public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseFacade {
|
||||
protected final Log _log;
|
||||
private RouterContext _context;
|
||||
private Map<String, FloodfillNetworkDatabaseFacade> _subDBs = new HashMap<String, FloodfillNetworkDatabaseFacade>();
|
||||
public static final String MAIN_DBID = "main";
|
||||
public static final String MULTIHOME_DBID = "clients_multihome";
|
||||
private static final String EXPLORATORY_DBID = "clients_exploratory";
|
||||
private static final String PROP_NETDB_ISOLATION = "router.netdb.isolation";
|
||||
public static final Hash MAIN_DBID = null;
|
||||
public static final Hash MULTIHOME_DBID = Hash.FAKE_HASH;
|
||||
private final FloodfillNetworkDatabaseFacade _mainDbid;
|
||||
private final FloodfillNetworkDatabaseFacade _multihomeDbid;
|
||||
private final FloodfillNetworkDatabaseFacade _exploratoryDbid;
|
||||
|
||||
/**
|
||||
* Construct a new FloodfillNetworkDatabaseSegmentor with the given
|
||||
@ -83,7 +72,10 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
||||
_context = context;
|
||||
_mainDbid = new FloodfillNetworkDatabaseFacade(_context, MAIN_DBID);
|
||||
_multihomeDbid = new FloodfillNetworkDatabaseFacade(_context, MULTIHOME_DBID);
|
||||
_exploratoryDbid = new FloodfillNetworkDatabaseFacade(_context, EXPLORATORY_DBID);
|
||||
}
|
||||
|
||||
public boolean useSubDbs() {
|
||||
return _context.getProperty(PROP_NETDB_ISOLATION, true);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -91,79 +83,55 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
||||
* If the ID is null, the main database is returned.
|
||||
*
|
||||
* @param id the ID of the FloodfillNetworkDatabaseFacade object to retrieve
|
||||
* @return the FloodfillNetworkDatabaseFacade object corresponding to the ID
|
||||
* @return the FloodfillNetworkDatabaseFacade object corresponding to the ID or null if it does not exist.
|
||||
*/
|
||||
@Override
|
||||
protected FloodfillNetworkDatabaseFacade getSubNetDB(Hash id) {
|
||||
if (id == null)
|
||||
return getSubNetDB(MAIN_DBID);
|
||||
return getSubNetDB(id.toBase32());
|
||||
if (!useSubDbs())
|
||||
return _mainDbid;
|
||||
return _context.clientManager().getClientFloodfillNetworkDatabaseFacade(id);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the FloodfillNetworkDatabaseFacade object for the specified ID string.
|
||||
*
|
||||
* @param id the ID of the FloodfillNetworkDatabaseFacade object to retrieve
|
||||
* @return the FloodfillNetworkDatabaseFacade object for the specified ID
|
||||
*
|
||||
*/
|
||||
@Override
|
||||
protected FloodfillNetworkDatabaseFacade getSubNetDB(String id) {
|
||||
if (id == null || id.isEmpty() || id.equals(MAIN_DBID))
|
||||
return mainNetDB();
|
||||
if (id.equals(MULTIHOME_DBID))
|
||||
return multiHomeNetDB();
|
||||
if (id.equals(EXPLORATORY_DBID))
|
||||
return clientNetDB();
|
||||
|
||||
if (id.endsWith(".i2p")) {
|
||||
if (!id.startsWith("clients_"))
|
||||
id = "clients_" + id;
|
||||
}
|
||||
|
||||
FloodfillNetworkDatabaseFacade subdb = _subDBs.get(id);
|
||||
if (subdb == null) {
|
||||
subdb = new FloodfillNetworkDatabaseFacade(_context, id);
|
||||
_subDBs.put(id, subdb);
|
||||
subdb.startup();
|
||||
subdb.createHandlers();
|
||||
}
|
||||
return subdb;
|
||||
}
|
||||
|
||||
/**
|
||||
* If we are floodfill, turn it off and tell everybody.
|
||||
* Shut down all known subDbs.
|
||||
* If we are floodfill, turn it off and tell everybody for the _mainDbid and the
|
||||
* _multihomeDbid
|
||||
*
|
||||
* @since 0.9.60
|
||||
*
|
||||
*/
|
||||
public synchronized void shutdown() {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("shutdown called from FNDS, shutting down main and multihome db");
|
||||
_mainDbid.shutdown();
|
||||
_multihomeDbid.shutdown();
|
||||
// shut down every entry in _subDBs
|
||||
for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("(dbid: " + subdb._dbid
|
||||
+ ") Shutting down all remaining sub-netDbs",
|
||||
new Exception());
|
||||
subdb.shutdown();
|
||||
}
|
||||
if (useSubDbs())
|
||||
_multihomeDbid.shutdown();
|
||||
}
|
||||
|
||||
/**
|
||||
* Start up the floodfill for the _mainDbid and the _multihomeDbid
|
||||
*
|
||||
* @since 0.9.60
|
||||
*
|
||||
*/
|
||||
public synchronized void startup() {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("startup called from FNDS, starting up main and multihome db");
|
||||
_mainDbid.startup();
|
||||
if (useSubDbs())
|
||||
_multihomeDbid.startup();
|
||||
}
|
||||
|
||||
/**
|
||||
* list of the RouterInfo objects for all known peers;
|
||||
*
|
||||
* @since 0.9.60
|
||||
*
|
||||
* @return non-null
|
||||
*/
|
||||
public List<RouterInfo> getKnownRouterData() {
|
||||
List<RouterInfo> rv = new ArrayList<RouterInfo>();
|
||||
for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("(dbid: " + subdb._dbid
|
||||
+ ") Called from FNDS, will be combined with all other subDbs",
|
||||
new Exception());
|
||||
_log.debug("getKnownRouterData Called from FNDS,"+subdb._dbid+", will be combined with all other subDbs");
|
||||
rv.addAll(subdb.getKnownRouterData());
|
||||
}
|
||||
return rv;
|
||||
@ -175,14 +143,13 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
||||
* List is not sorted and not shuffled.
|
||||
*
|
||||
* @since 0.9.60
|
||||
* @return non-null
|
||||
*/
|
||||
public List<Hash> getFloodfillPeers() {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("getFloodfillPeers collecting all floodfill peers across all subDbs");
|
||||
List<Hash> peers = new ArrayList<Hash>();
|
||||
for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("(dbid: " + subdb._dbid
|
||||
+ ") Deprecated! Arbitrary selection of this subDb",
|
||||
new Exception());
|
||||
peers.addAll(subdb.getFloodfillPeers());
|
||||
}
|
||||
return peers;
|
||||
@ -193,12 +160,12 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
||||
* but the client dbid is not.
|
||||
*
|
||||
* @param key The LS key for client.
|
||||
* @return may be null
|
||||
* @since 0.9.60
|
||||
*/
|
||||
@Override
|
||||
public LeaseSet lookupLeaseSetHashIsClient(Hash key) {
|
||||
String dbid = matchDbid(key);
|
||||
return lookupLeaseSetLocally(key, dbid);
|
||||
return lookupLeaseSetLocally(key, null);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -206,27 +173,21 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
||||
* if a DBID is not provided, the clients will all be checked, and the
|
||||
* first value will be used.
|
||||
*
|
||||
* @return may be null
|
||||
* @since 0.9.60
|
||||
*
|
||||
*/
|
||||
@Override
|
||||
protected LeaseSet lookupLeaseSetLocally(Hash key, String dbid) {
|
||||
if (dbid == null || dbid.isEmpty()) {
|
||||
//@Override
|
||||
protected LeaseSet lookupLeaseSetLocally(Hash key, Hash dbid) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("lookupLeaseSetLocally on all subDbs: " + key.toBase32());
|
||||
if (dbid == null) {
|
||||
LeaseSet rv = null;
|
||||
for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("(dbid: " + subdb._dbid
|
||||
+ ") Deprecated! Arbitrary selection of this subDb",
|
||||
new Exception());
|
||||
for (FloodfillNetworkDatabaseFacade subdb : getClientSubNetDBs()) {
|
||||
rv = subdb.lookupLeaseSetLocally(key);
|
||||
if (rv != null) {
|
||||
return rv;
|
||||
}
|
||||
}
|
||||
rv = this.lookupLeaseSetLocally(key, MAIN_DBID);
|
||||
if (rv != null) {
|
||||
return rv;
|
||||
}
|
||||
}
|
||||
return this.getSubNetDB(dbid).lookupLeaseSetLocally(key);
|
||||
}
|
||||
@ -235,11 +196,15 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
||||
* Check if all of the known subDbs are initialized
|
||||
*
|
||||
* @since 0.9.60
|
||||
*
|
||||
* @return true if the mainNetdb and all known client netDbs are initialized
|
||||
*/
|
||||
public boolean isInitialized() {
|
||||
boolean rv = mainNetDB().isInitialized();
|
||||
for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) {
|
||||
if (_mainDbid == null)
|
||||
return false;
|
||||
boolean rv = _mainDbid.isInitialized();
|
||||
if (!rv)
|
||||
return rv;
|
||||
for (FloodfillNetworkDatabaseFacade subdb : getClientSubNetDBs()) {
|
||||
rv = subdb.isInitialized();
|
||||
if (!rv) {
|
||||
break;
|
||||
@ -248,37 +213,18 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* list of the RouterInfo objects for all known peers
|
||||
*
|
||||
* @since 0.9.60
|
||||
*
|
||||
*/
|
||||
@Override
|
||||
public Set<RouterInfo> getRouters() {
|
||||
Set<RouterInfo> rv = new HashSet<>();
|
||||
for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("(dbid: " + subdb._dbid
|
||||
+ ") Deprecated! Arbitrary selection of this subDb",
|
||||
new Exception());
|
||||
rv.addAll(subdb.getRouters());
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* list of the RouterInfo objects for all known peers known to clients(in subDbs) only
|
||||
*
|
||||
* @since 0.9.60
|
||||
*
|
||||
* @return non-null
|
||||
*/
|
||||
public Set<RouterInfo> getRoutersKnownToClients() {
|
||||
Set<RouterInfo> rv = new HashSet<>();
|
||||
for (String key : getClients()) {
|
||||
rv.addAll(this.getSubNetDB(key).getRouters());
|
||||
for (FloodfillNetworkDatabaseFacade subdb : getClientSubNetDBs()) {
|
||||
Set<RouterInfo> rv2 = subdb.getRouters();
|
||||
if (rv2 != null)
|
||||
rv.addAll(rv2);
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
@ -287,29 +233,14 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
||||
* list of the LeaseSet objects for all known peers known to clients(in subDbs) only
|
||||
*
|
||||
* @since 0.9.60
|
||||
*
|
||||
* @return non-null
|
||||
*/
|
||||
public Set<LeaseSet> getLeasesKnownToClients() {
|
||||
Set<LeaseSet> rv = new HashSet<>();
|
||||
for (String key : getClients()) {
|
||||
rv.addAll(this.getSubNetDB(key).getLeases());
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* list all of the dbids of all known client subDbs
|
||||
*
|
||||
* @since 0.9.60
|
||||
*
|
||||
*/
|
||||
public List<String> getClients() {
|
||||
List<String> rv = new ArrayList<String>();
|
||||
for (String key : _subDBs.keySet()) {
|
||||
if (key != null && !key.isEmpty()) {
|
||||
if (key.startsWith("client"))
|
||||
rv.add(key);
|
||||
}
|
||||
for (FloodfillNetworkDatabaseFacade fndf : getClientSubNetDBs()) {
|
||||
Set<LeaseSet> rv2 = fndf.getLeases();
|
||||
if (rv2 != null)
|
||||
rv.addAll(rv2);
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
@ -318,7 +249,7 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
||||
* get the main netDb, which is the one we will use if we are a floodfill
|
||||
*
|
||||
* @since 0.9.60
|
||||
*
|
||||
* @return may be null
|
||||
*/
|
||||
@Override
|
||||
public FloodfillNetworkDatabaseFacade mainNetDB() {
|
||||
@ -329,69 +260,55 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
||||
* get the multiHome netDb, which is especially for handling multihomes
|
||||
*
|
||||
* @since 0.9.60
|
||||
*
|
||||
* @return may be null
|
||||
*/
|
||||
@Override
|
||||
public FloodfillNetworkDatabaseFacade multiHomeNetDB() {
|
||||
return _multihomeDbid;
|
||||
}
|
||||
|
||||
/**
|
||||
* get the client netDb for the given id.
|
||||
* Will return the "exploratory(default client)" netDb if
|
||||
* the dbid is null.
|
||||
*
|
||||
* @since 0.9.60
|
||||
*
|
||||
*/
|
||||
@Override
|
||||
public FloodfillNetworkDatabaseFacade clientNetDB(String id) {
|
||||
if (id == null || id.isEmpty())
|
||||
return clientNetDB();
|
||||
return this.getSubNetDB(id);
|
||||
}
|
||||
|
||||
/**
|
||||
* get the client netDb for the given id
|
||||
* Will return the "exploratory(default client)" netDb if
|
||||
* the dbid is null.
|
||||
*
|
||||
* @since 0.9.60
|
||||
*
|
||||
* @return may be null if the client netDb does not exist
|
||||
*/
|
||||
@Override
|
||||
public FloodfillNetworkDatabaseFacade clientNetDB(Hash id) {
|
||||
if (id != null)
|
||||
return getSubNetDB(id.toBase32());
|
||||
return clientNetDB();
|
||||
if (_log.shouldDebug())
|
||||
_log.debug("looked up clientNetDB: " + id);
|
||||
if (!useSubDbs())
|
||||
return _mainDbid;
|
||||
if (id != null){
|
||||
FloodfillNetworkDatabaseFacade fndf = getSubNetDB(id);
|
||||
if (fndf != null)
|
||||
return fndf;
|
||||
}
|
||||
return mainNetDB();
|
||||
}
|
||||
|
||||
/**
|
||||
* get the default client(exploratory) netDb
|
||||
* look up the dbid of the client or clients with the given signing
|
||||
* public key
|
||||
*
|
||||
* @since 0.9.60
|
||||
*
|
||||
*/
|
||||
public FloodfillNetworkDatabaseFacade clientNetDB() {
|
||||
return _exploratoryDbid;
|
||||
}
|
||||
|
||||
/**
|
||||
* look up the dbid of the client with the given signing public key
|
||||
*
|
||||
* @since 0.9.60
|
||||
*
|
||||
* @return non-null
|
||||
*/
|
||||
@Override
|
||||
public List<String> lookupClientBySigningPublicKey(SigningPublicKey spk) {
|
||||
List<String> rv = new ArrayList<>();
|
||||
for (String subdb : getClients()) {
|
||||
public List<Hash> lookupClientBySigningPublicKey(SigningPublicKey spk) {
|
||||
List<Hash> rv = new ArrayList<>();
|
||||
for (Hash subdb : _context.clientManager().getPrimaryHashes()) {
|
||||
FloodfillNetworkDatabaseFacade fndf = _context.clientManager().getClientFloodfillNetworkDatabaseFacade(subdb);
|
||||
if (fndf == null)
|
||||
continue;
|
||||
// if (subdb.startsWith("clients_"))
|
||||
// TODO: see if we can access only one subDb at a time when we need
|
||||
// to look up a client by SPK. We mostly need this for managing blinded
|
||||
// and encrypted keys in the Keyring Config UI page. See also
|
||||
// ConfigKeyringHelper
|
||||
BlindData bd = _subDBs.get(subdb).getBlindData(spk);
|
||||
BlindData bd = fndf.getBlindData(spk);
|
||||
if (bd != null) {
|
||||
rv.add(subdb);
|
||||
}
|
||||
@ -400,44 +317,44 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
||||
}
|
||||
|
||||
/**
|
||||
* Public helper to return the dbid that is associated with the
|
||||
* supplied client key.
|
||||
*
|
||||
* @param clientKey The LS key of the subDb context
|
||||
* @since 0.9.60
|
||||
*/
|
||||
@Override
|
||||
public String getDbidByHash(Hash clientKey) {
|
||||
return matchDbid(clientKey);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the dbid that is associated with the supplied client LS key
|
||||
*
|
||||
* @param clientKey The LS key of the subDb context
|
||||
* @since 0.9.60
|
||||
*/
|
||||
private String matchDbid(Hash clientKey) {
|
||||
for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) {
|
||||
if (subdb.matchClientKey(clientKey))
|
||||
return subdb._dbid;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* get all the subDbs and return them in a Set.
|
||||
* get all the subDbs and return them in a Set. This includes the main netDb
|
||||
* and the possible-multihomes netDb
|
||||
*
|
||||
* @since 0.9.60
|
||||
*
|
||||
* @return non-null
|
||||
*/
|
||||
@Override
|
||||
public Set<FloodfillNetworkDatabaseFacade> getSubNetDBs() {
|
||||
if (!_mainDbid.isInitialized())
|
||||
return Collections.emptySet();
|
||||
Set<FloodfillNetworkDatabaseFacade> rv = new HashSet<>();
|
||||
rv.add(mainNetDB());
|
||||
if (!useSubDbs()) {
|
||||
rv.add(_mainDbid);
|
||||
return rv;
|
||||
}
|
||||
rv.add(_mainDbid);
|
||||
rv.add(multiHomeNetDB());
|
||||
rv.add(clientNetDB());
|
||||
rv.addAll(_subDBs.values());
|
||||
rv.addAll(_context.clientManager().getClientFloodfillNetworkDatabaseFacades());
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* get all the subDbs and return them in a Set. This only includes subDbs associated
|
||||
* with specific clients, unless subDbs are disabled in which case it only contains the
|
||||
* main netDB
|
||||
*
|
||||
* @since 0.9.60
|
||||
* @return non-null
|
||||
*/
|
||||
private Set<FloodfillNetworkDatabaseFacade> getClientSubNetDBs() {
|
||||
if (!_mainDbid.isInitialized())
|
||||
return Collections.emptySet();
|
||||
Set<FloodfillNetworkDatabaseFacade> rv = new HashSet<>();
|
||||
if (!useSubDbs()) {
|
||||
rv.add(_mainDbid);
|
||||
return rv;
|
||||
}
|
||||
rv.addAll(_context.clientManager().getClientFloodfillNetworkDatabaseFacades());
|
||||
return rv;
|
||||
}
|
||||
|
||||
@ -445,12 +362,12 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
||||
* list of the BlindData objects for all known clients
|
||||
*
|
||||
* @since 0.9.60
|
||||
*
|
||||
* @return non-null
|
||||
*/
|
||||
@Override
|
||||
public List<BlindData> getLocalClientsBlindData() {
|
||||
List<BlindData> rv = new ArrayList<>();
|
||||
for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) {
|
||||
for (FloodfillNetworkDatabaseFacade subdb : getClientSubNetDBs()) {
|
||||
rv.addAll(subdb.getBlindData());
|
||||
}
|
||||
return rv;
|
||||
|
@ -460,7 +460,7 @@ class FloodfillVerifyStoreJob extends JobImpl {
|
||||
private void resend() {
|
||||
// It's safe to check the default netDb first, but if the lookup is for
|
||||
// a client, nearly all RI is expected to be found in the FF netDb.
|
||||
DatabaseEntry ds = getContext().netDbSegmentor().getSubNetDB(_facade._dbid).lookupLocally(_key);
|
||||
DatabaseEntry ds = _facade.lookupLocally(_key);
|
||||
if ((ds == null) && _facade.isClientDb() && _isRouterInfo)
|
||||
// It's safe to check the floodfill netDb for RI
|
||||
ds = getContext().netDb().lookupLocally(_key);
|
||||
|
@ -134,12 +134,7 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
|
||||
if (!ls.getReceivedAsReply())
|
||||
ls.setReceivedAsPublished();
|
||||
if (_facade.isClientDb())
|
||||
if (_facade.matchClientKey(key))
|
||||
// In the client subDb context, the only local key to worry about
|
||||
// is the key for this client.
|
||||
blockStore = true;
|
||||
else
|
||||
blockStore = false;
|
||||
blockStore = false;
|
||||
else if (getContext().clientManager().isLocal(key))
|
||||
// Non-client context
|
||||
if (_facade.floodfillEnabled() && (_fromHash != null))
|
||||
@ -155,7 +150,8 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
|
||||
getContext().statManager().addRateData("netDb.storeLocalLeaseSetAttempt", 1, 0);
|
||||
// throw rather than return, so that we send the ack below (prevent easy attack)
|
||||
dontBlamePeer = true;
|
||||
getContext().multihomeNetDb().store(key, ls);
|
||||
if (getContext().netDbSegmentor().useSubDbs())
|
||||
getContext().multihomeNetDb().store(key, ls);
|
||||
throw new IllegalArgumentException("(dbid: " + _facade._dbid
|
||||
+ ") Peer attempted to store local leaseSet: "
|
||||
+ key.toBase32());
|
||||
|
@ -80,7 +80,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
private NegativeLookupCache _negativeCache;
|
||||
protected final int _networkID;
|
||||
private final BlindCache _blindCache;
|
||||
protected final String _dbid;
|
||||
protected final Hash _dbid;
|
||||
private Hash _localKey;
|
||||
|
||||
/**
|
||||
@ -172,7 +172,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
private static final int BUCKET_SIZE = 24;
|
||||
private static final int KAD_B = 4;
|
||||
|
||||
public KademliaNetworkDatabaseFacade(RouterContext context, String dbid) {
|
||||
public KademliaNetworkDatabaseFacade(RouterContext context, Hash dbid) {
|
||||
_context = context;
|
||||
_dbid = dbid;
|
||||
_log = _context.logManager().getLog(getClass());
|
||||
@ -297,8 +297,8 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
String getDbDir() {
|
||||
if (_dbDir == null) {
|
||||
String dbDir = _context.getProperty(PROP_DB_DIR, DEFAULT_DB_DIR);
|
||||
if (!_dbid.equals(FloodfillNetworkDatabaseSegmentor.MAIN_DBID) && _dbid != null) {
|
||||
File subDir = new File(dbDir, _dbid);
|
||||
if (_dbid != FloodfillNetworkDatabaseSegmentor.MAIN_DBID) {
|
||||
File subDir = new File(dbDir, _dbid.toBase32());
|
||||
dbDir = subDir.toString();
|
||||
}
|
||||
return dbDir;
|
||||
@ -306,12 +306,37 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
return _dbDir;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the database is a client DB.
|
||||
*
|
||||
* @return true if the database is a client DB, false otherwise
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public boolean isClientDb() {
|
||||
return _dbid.startsWith("clients_");
|
||||
// This is a null check in disguise, don't use .equals() here.
|
||||
// FNDS.MAIN_DBID is always null. and if _dbid is also null it is not a client Db
|
||||
if (_dbid == FloodfillNetworkDatabaseSegmentor.MAIN_DBID)
|
||||
return false;
|
||||
if (_dbid.equals(FloodfillNetworkDatabaseSegmentor.MULTIHOME_DBID))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Checks if the current database is a multihome database.
|
||||
*
|
||||
* @return true if the current database is a multihome database, false otherwise.
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public boolean isMultihomeDb() {
|
||||
return _dbid.equals(FloodfillNetworkDatabaseSegmentor.MULTIHOME_DBID);
|
||||
// This is a null check in disguise, don't use .equals() here.
|
||||
// FNDS.MAIN_DBID is always null, and if _dbid is null it is not the multihome Db
|
||||
if (_dbid == FloodfillNetworkDatabaseSegmentor.MAIN_DBID)
|
||||
return false;
|
||||
if (_dbid.equals(FloodfillNetworkDatabaseSegmentor.MULTIHOME_DBID))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
public synchronized void startup() {
|
||||
@ -357,27 +382,29 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
}
|
||||
|
||||
if (!QUIET) {
|
||||
// fill the search queue with random keys in buckets that are too small
|
||||
// Disabled since KBucketImpl.generateRandomKey() is b0rked,
|
||||
// and anyway, we want to search for a completely random key,
|
||||
// not a random key for a particular kbucket.
|
||||
// _context.jobQueue().addJob(new ExploreKeySelectorJob(_context, this));
|
||||
if (_exploreJob == null)
|
||||
_exploreJob = new StartExplorersJob(_context, this);
|
||||
// fire off a group of searches from the explore pool
|
||||
// Don't start it right away, so we don't send searches for random keys
|
||||
// out our 0-hop exploratory tunnels (generating direct connections to
|
||||
// one or more floodfill peers within seconds of startup).
|
||||
// We're trying to minimize the ff connections to lessen the load on the
|
||||
// floodfills, and in any case let's try to build some real expl. tunnels first.
|
||||
// No rush, it only runs every 30m.
|
||||
_exploreJob.getTiming().setStartAfter(now + EXPLORE_JOB_DELAY);
|
||||
_context.jobQueue().addJob(_exploreJob);
|
||||
if (!isClientDb() && !isMultihomeDb()) {
|
||||
// fill the search queue with random keys in buckets that are too small
|
||||
// Disabled since KBucketImpl.generateRandomKey() is b0rked,
|
||||
// and anyway, we want to search for a completely random key,
|
||||
// not a random key for a particular kbucket.
|
||||
// _context.jobQueue().addJob(new ExploreKeySelectorJob(_context, this));
|
||||
if (_exploreJob == null)
|
||||
_exploreJob = new StartExplorersJob(_context, this);
|
||||
// fire off a group of searches from the explore pool
|
||||
// Don't start it right away, so we don't send searches for random keys
|
||||
// out our 0-hop exploratory tunnels (generating direct connections to
|
||||
// one or more floodfill peers within seconds of startup).
|
||||
// We're trying to minimize the ff connections to lessen the load on the
|
||||
// floodfills, and in any case let's try to build some real expl. tunnels first.
|
||||
// No rush, it only runs every 30m.
|
||||
_exploreJob.getTiming().setStartAfter(now + EXPLORE_JOB_DELAY);
|
||||
_context.jobQueue().addJob(_exploreJob);
|
||||
}
|
||||
} else {
|
||||
_log.warn("Operating in quiet mode - not exploring or pushing data proactively, simply reactively");
|
||||
_log.warn("This should NOT be used in production");
|
||||
}
|
||||
if (_dbid == null || _dbid.equals(FloodfillNetworkDatabaseSegmentor.MAIN_DBID) || _dbid.isEmpty()) {
|
||||
if (!isClientDb() && !isMultihomeDb()) {
|
||||
// periodically update and resign the router's 'published date', which basically
|
||||
// serves as a version
|
||||
Job plrij = new PublishLocalRouterInfoJob(_context);
|
||||
@ -821,18 +848,24 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
_log.error("locally published leaseSet is not valid?", iae);
|
||||
throw iae;
|
||||
}
|
||||
if (_localKey != null) {
|
||||
if (!_localKey.equals(localLeaseSet.getHash()))
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Error, the local LS hash ("
|
||||
+ _localKey + ") does not match the published hash ("
|
||||
+ localLeaseSet.getHash() + ")! This shouldn't happen!",
|
||||
new Exception());
|
||||
} else {
|
||||
// This will only happen once when the local LS is first published
|
||||
_localKey = localLeaseSet.getHash();
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Local client LS key initialized to: " + _localKey);
|
||||
if (!_context.netDbSegmentor().useSubDbs()){
|
||||
String dbid = "main netDb";
|
||||
if (isClientDb()) {
|
||||
dbid = "client netDb: " + _dbid;
|
||||
}
|
||||
if (_localKey != null) {
|
||||
if (!_localKey.equals(localLeaseSet.getHash()))
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("[" + dbid + "]" + "Error, the local LS hash ("
|
||||
+ _localKey + ") does not match the published hash ("
|
||||
+ localLeaseSet.getHash() + ")! This shouldn't happen!",
|
||||
new Exception());
|
||||
} else {
|
||||
// This will only happen once when the local LS is first published
|
||||
_localKey = localLeaseSet.getHash();
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("[" + dbid + "]" + "Local client LS key initialized to: " + _localKey);
|
||||
}
|
||||
}
|
||||
if (!_context.clientManager().shouldPublishLeaseSet(h))
|
||||
return;
|
||||
@ -1040,31 +1073,18 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
if (rv != null && rv.getEarliestLeaseDate() >= leaseSet.getEarliestLeaseDate()) {
|
||||
if (_log.shouldDebug())
|
||||
_log.debug("Not storing older " + key);
|
||||
// TODO: Determine if this deep equals is actually truly necessary as part of this test or if the date is actually enough
|
||||
if (rv.equals(leaseSet)) {
|
||||
if (_log.shouldDebug())
|
||||
_log.debug("Updating leaseSet found in Datastore " + key);
|
||||
/** - DatabaseEntry.java note
|
||||
* we used to just copy the flags here but due to concerns about crafted
|
||||
* entries being used to "follow" a leaseSet from one context to another,
|
||||
* i.e. sent to a client vs sent to a router. Copying the entire leaseSet,
|
||||
* flags and all, limits the ability of the attacker craft leaseSet entries
|
||||
* maliciously.
|
||||
*/
|
||||
_ds.put(key, leaseSet);
|
||||
rv = (LeaseSet)_ds.get(key);
|
||||
Hash to = leaseSet.getReceivedBy();
|
||||
if (to != null) {
|
||||
rv.setReceivedBy(to);
|
||||
} else if (leaseSet.getReceivedAsReply()) {
|
||||
rv.setReceivedAsReply();
|
||||
}
|
||||
if (leaseSet.getReceivedAsPublished()) {
|
||||
rv.setReceivedAsPublished();
|
||||
}
|
||||
return rv;
|
||||
}// TODO: Is there any reason to do anything here, if the fields are somehow unequal?
|
||||
// Like, is there any case where this is not true? I don't think it's possible for it to be.
|
||||
// if it hasn't changed, no need to do anything
|
||||
// except copy over the flags
|
||||
Hash to = leaseSet.getReceivedBy();
|
||||
if (to != null) {
|
||||
rv.setReceivedBy(to);
|
||||
} else if (leaseSet.getReceivedAsReply()) {
|
||||
rv.setReceivedAsReply();
|
||||
}
|
||||
if (leaseSet.getReceivedAsPublished()) {
|
||||
rv.setReceivedAsPublished();
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
} catch (ClassCastException cce) {
|
||||
throw new IllegalArgumentException("Attempt to replace RI with " + leaseSet);
|
||||
|
@ -396,13 +396,7 @@ public class PersistentDataStore extends TransientDataStore {
|
||||
public void wakeup() {
|
||||
requeue(0);
|
||||
}
|
||||
|
||||
private void setNetDbReady() {
|
||||
// Only the floodfill netDb needs to call Router::setNetDbReady()
|
||||
if (_facade._dbid.equals(FloodfillNetworkDatabaseSegmentor.MAIN_DBID))
|
||||
_context.router().setNetDbReady();
|
||||
}
|
||||
|
||||
|
||||
private void readFiles() {
|
||||
int routerCount = 0;
|
||||
|
||||
@ -467,11 +461,11 @@ public class PersistentDataStore extends TransientDataStore {
|
||||
// This is enough to let i2ptunnel get started.
|
||||
// Do not set _initialized yet so we don't start rescanning.
|
||||
_setNetDbReady = true;
|
||||
setNetDbReady();
|
||||
_context.router().setNetDbReady();
|
||||
} else if (i == 500 && !_setNetDbReady) {
|
||||
// do this for faster systems also at 500
|
||||
_setNetDbReady = true;
|
||||
setNetDbReady();
|
||||
_context.router().setNetDbReady();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -479,35 +473,23 @@ public class PersistentDataStore extends TransientDataStore {
|
||||
|
||||
if (!_initialized) {
|
||||
_initialized = true;
|
||||
if (_facade.isClientDb()) {
|
||||
_lastReseed = _context.clock().now();
|
||||
_setNetDbReady = true;
|
||||
setNetDbReady();
|
||||
} else if (_facade.isMultihomeDb()) {
|
||||
_lastReseed = _context.clock().now();
|
||||
_setNetDbReady = true;
|
||||
setNetDbReady();
|
||||
} else if (_facade.reseedChecker().checkReseed(routerCount)) {
|
||||
if (_facade.reseedChecker().checkReseed(routerCount)) {
|
||||
_lastReseed = _context.clock().now();
|
||||
// checkReseed will call wakeup() when done and we will run again
|
||||
} else {
|
||||
_setNetDbReady = true;
|
||||
setNetDbReady();
|
||||
_context.router().setNetDbReady();
|
||||
}
|
||||
} else if (_lastReseed < _context.clock().now() - MIN_RESEED_INTERVAL) {
|
||||
int count = Math.min(routerCount, size());
|
||||
if (_facade.isClientDb()) {
|
||||
_lastReseed = _context.clock().now();
|
||||
} else if (_facade.isMultihomeDb()) {
|
||||
_lastReseed = _context.clock().now();
|
||||
} else if (count < MIN_ROUTERS) {
|
||||
if (count < MIN_ROUTERS) {
|
||||
if (_facade.reseedChecker().checkReseed(count))
|
||||
_lastReseed = _context.clock().now();
|
||||
// checkReseed will call wakeup() when done and we will run again
|
||||
} else {
|
||||
if (!_setNetDbReady) {
|
||||
_setNetDbReady = true;
|
||||
setNetDbReady();
|
||||
_context.router().setNetDbReady();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -87,11 +87,11 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
|
||||
try {
|
||||
if (entry.isRouterInfo()) {
|
||||
RouterInfo ri = (RouterInfo) entry;
|
||||
getContext().netDbSegmentor().getSubNetDB(_facade._dbid).store(ri.getHash(), ri);
|
||||
_facade.store(ri.getHash(), ri);
|
||||
}
|
||||
if (entry.isLeaseSet()) {
|
||||
LeaseSet ls = (LeaseSet) entry;
|
||||
getContext().netDbSegmentor().getSubNetDB(_facade._dbid).store(ls.getHash(), ls);
|
||||
_facade.store(ls.getHash(), ls);
|
||||
}
|
||||
} catch (UnsupportedCryptoException iae) {
|
||||
// don't blame the peer
|
||||
|
@ -18,6 +18,7 @@ import net.i2p.router.Job;
|
||||
import net.i2p.router.NetworkDatabaseFacade;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.networkdb.reseed.ReseedChecker;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* SegmentedNetworkDatabaseFacade
|
||||
@ -59,22 +60,28 @@ public abstract class SegmentedNetworkDatabaseFacade {
|
||||
public SegmentedNetworkDatabaseFacade(RouterContext context) {
|
||||
// super(context, null);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get a sub-netDb using a string identifier
|
||||
* Determine whether to use subDb defenses at all or to use the extant FNDF/RAP/RAR defenses
|
||||
*
|
||||
* @return true if using subDbs, false if not
|
||||
* @since 0.9.60
|
||||
*/
|
||||
protected abstract FloodfillNetworkDatabaseFacade getSubNetDB(String dbid);
|
||||
public boolean useSubDbs() {
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a sub-netDb using a Hash identifier
|
||||
*
|
||||
* @return client subDb for hash, or null if it does not exist
|
||||
* @since 0.9.60
|
||||
*/
|
||||
protected abstract FloodfillNetworkDatabaseFacade getSubNetDB(Hash dbid);
|
||||
/**
|
||||
* Get the main netDb, the one which is used if we're a floodfill
|
||||
*
|
||||
* @return may be null if main netDb is not initialized
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public abstract FloodfillNetworkDatabaseFacade mainNetDB();
|
||||
@ -82,79 +89,77 @@ public abstract class SegmentedNetworkDatabaseFacade {
|
||||
* Get the multihome netDb, the one which is used if we're a floodfill AND we
|
||||
* have a multihome address sent to us
|
||||
*
|
||||
* @return may be null if the multihome netDb is not initialized
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public abstract FloodfillNetworkDatabaseFacade multiHomeNetDB();
|
||||
/**
|
||||
* Get a client netDb for a given client string identifier. Will never
|
||||
* return the mainNetDB.
|
||||
*
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public abstract FloodfillNetworkDatabaseFacade clientNetDB(String dbid);
|
||||
/**
|
||||
* Get a client netDb for a given client Hash identifier. Will never
|
||||
* return the mainNetDB.
|
||||
*
|
||||
* @return may be null if the client netDb does not exist
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public abstract FloodfillNetworkDatabaseFacade clientNetDB(Hash dbid);
|
||||
/**
|
||||
* Shut down the network database and all subDbs.
|
||||
* Shut down the network databases
|
||||
*
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public abstract void shutdown();
|
||||
/**
|
||||
* Start up the network databases
|
||||
*
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public abstract void startup();
|
||||
/**
|
||||
* Lookup the leaseSet for a given key in only client dbs.
|
||||
*
|
||||
* @return may be null
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public abstract LeaseSet lookupLeaseSetHashIsClient(Hash key);
|
||||
/**
|
||||
* Lookup the leaseSet for a given key locally across all dbs if dbid is
|
||||
* null, or locally for the given dbid if it is not null. Use carefully,
|
||||
* this function crosses db boundaries and is intended only for local use.
|
||||
*
|
||||
* @since 0.9.60
|
||||
*/
|
||||
protected abstract LeaseSet lookupLeaseSetLocally(Hash key, String dbid);
|
||||
/**
|
||||
* Lookup the dbid for a given hash.
|
||||
*
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public abstract String getDbidByHash(Hash clientKey);
|
||||
/**
|
||||
* Get a set of all sub-netDbs.
|
||||
*
|
||||
* @return all the sub netDbs including the main
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public abstract Set<FloodfillNetworkDatabaseFacade> getSubNetDBs();
|
||||
/**
|
||||
* Get a set of all client dbid strings
|
||||
* Make sure the SNDF is initialized. This is overridden in
|
||||
* FloodfillNetworkDatabaseSegmentor so that it will be false until
|
||||
* *all* required subDbs are initialized.
|
||||
*
|
||||
* @return true if the netDbs are initialized
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public abstract List<String> getClients();
|
||||
/**
|
||||
* Make sure the SNDF is initialized
|
||||
*/
|
||||
public boolean isInitialized() {
|
||||
return mainNetDB().isInitialized();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a set of all routers
|
||||
* list all of the RouterInfo objects known to all of the subDbs including
|
||||
* the main subDb.
|
||||
*
|
||||
* @return all of the RouterInfo objects known to all of the netDbs. non-null
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public Set<RouterInfo> getRouters() {
|
||||
return mainNetDB().getRouters();
|
||||
Set<RouterInfo> rv = new HashSet<>();
|
||||
for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) {
|
||||
rv.addAll(subdb.getRouters());
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a set of all routers known to clients, which should always be zero.
|
||||
* list of the RouterInfo objects for all known peers in all client
|
||||
* subDbs which is mostly pointless because they should normally reject
|
||||
* them anyway.
|
||||
*
|
||||
* @return non-null all the routerInfos in all of the client netDbs *only*
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public Set<RouterInfo> getRoutersKnownToClients() {
|
||||
@ -167,8 +172,11 @@ public abstract class SegmentedNetworkDatabaseFacade {
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a set of all leases known to all clients.
|
||||
* Get a set of all leases known to all clients. These will be
|
||||
* leaseSets for destinations that the clients communicate with
|
||||
* and the leaseSet of the client itself.
|
||||
*
|
||||
* @return non-null. all the leaseSets known to all of the client netDbs
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public Set<LeaseSet> getLeasesKnownToClients() {
|
||||
@ -181,7 +189,8 @@ public abstract class SegmentedNetworkDatabaseFacade {
|
||||
}
|
||||
/**
|
||||
* Check if the mainNetDB needs to reseed
|
||||
*
|
||||
*
|
||||
* @return non-null.
|
||||
* @since 0.9.60
|
||||
* */
|
||||
public ReseedChecker reseedChecker() {
|
||||
@ -190,14 +199,16 @@ public abstract class SegmentedNetworkDatabaseFacade {
|
||||
/**
|
||||
* For console ConfigKeyringHelper
|
||||
*
|
||||
* @return non-null
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public List<String> lookupClientBySigningPublicKey(SigningPublicKey spk) {
|
||||
public List<Hash> lookupClientBySigningPublicKey(SigningPublicKey spk) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
/**
|
||||
* For console ConfigKeyringHelper
|
||||
*
|
||||
* @return non-null
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public List<BlindData> getLocalClientsBlindData() {
|
||||
|
@ -10,6 +10,7 @@ package net.i2p.router.startup;
|
||||
|
||||
import net.i2p.router.JobImpl;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseSegmentor;
|
||||
|
||||
/** start up the network database */
|
||||
class BootNetworkDbJob extends JobImpl {
|
||||
@ -21,6 +22,6 @@ class BootNetworkDbJob extends JobImpl {
|
||||
public String getName() { return "Boot Network Database"; }
|
||||
|
||||
public void runJob() {
|
||||
getContext().netDb().startup();
|
||||
getContext().netDbSegmentor().startup();
|
||||
}
|
||||
}
|
||||
|
@ -242,13 +242,13 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
|
||||
// Handling of client tunnel messages need explicit handling
|
||||
// in the context of the client subDb.
|
||||
if (_client != null) {
|
||||
String dbid = _context.netDbSegmentor().getDbidByHash(_client);
|
||||
if (dbid == null) {
|
||||
//Hash dbid = _context.netDbSegmentor().getDbidByHash(_client);
|
||||
/*if (dbid == null) {
|
||||
// This error shouldn't occur. All clients should have their own netDb.
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Error, client (" + _clientNickname + ") dbid not found while processing messages in the IBMD.");
|
||||
return;
|
||||
}
|
||||
}*/
|
||||
// For now, the only client message we know how to handle here is a DSM.
|
||||
// There aren't normally DSM messages here, but it should be safe to store
|
||||
// them in the client netDb.
|
||||
@ -391,7 +391,7 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
|
||||
_log.info("Storing garlic LS down tunnel for: " + dsm.getKey() + " sent to: "
|
||||
+ _clientNickname + " ("
|
||||
+ (_client != null ? _client.toBase32() : ") router"));
|
||||
if (_client.toBase32() != null) {
|
||||
if (_client != null) {
|
||||
// We need to replicate some of the handling that was previously
|
||||
// performed when these types of messages were passed back to
|
||||
// the inNetMessagePool.
|
||||
|
Reference in New Issue
Block a user