forked from I2P_Developers/i2p.i2p
Compare commits
2 Commits
i2p.i2p.ne
...
i2p.i2p.2.
Author | SHA1 | Date | |
---|---|---|---|
c2b877e932 | |||
c538ba69b5 |
@ -375,7 +375,18 @@ public class RouterContext extends I2PAppContext {
|
||||
public SegmentedNetworkDatabaseFacade netDbSegmentor() { return _netDb; }
|
||||
public FloodfillNetworkDatabaseFacade netDb() { return _netDb.mainNetDB(); }
|
||||
public FloodfillNetworkDatabaseFacade multihomeNetDb() { return _netDb.multiHomeNetDB(); }
|
||||
|
||||
/**
|
||||
* Get the client netDb for the given id.
|
||||
* Will return the main netDb if
|
||||
* the dbid is null or the client db is not found.
|
||||
*
|
||||
* @param id may be null
|
||||
* @return non-null
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public FloodfillNetworkDatabaseFacade clientNetDb(Hash id) { return _netDb.clientNetDB(id); }
|
||||
|
||||
/**
|
||||
* The actual driver of the router, where all jobs are enqueued and processed.
|
||||
*/
|
||||
|
@ -49,9 +49,8 @@ import net.i2p.router.JobImpl;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.crypto.TransientSessionKeyManager;
|
||||
import net.i2p.router.crypto.ratchet.RatchetSKM;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseSegmentor;
|
||||
import net.i2p.router.crypto.ratchet.MuxedSKM;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
|
||||
import net.i2p.util.ConcurrentHashSet;
|
||||
import net.i2p.util.I2PThread;
|
||||
import net.i2p.util.Log;
|
||||
@ -160,8 +159,6 @@ class ClientConnectionRunner {
|
||||
_alreadyProcessed = new ArrayList<MessageId>();
|
||||
_acceptedPending = new ConcurrentHashSet<MessageId>();
|
||||
_messageId = new AtomicInteger(_context.random().nextInt());
|
||||
// Set up the per-destination FloodfillNetworkDatabaseFacade to prevent clients from being able to
|
||||
// update leaseSet entries in the floodfill netDb
|
||||
}
|
||||
|
||||
private static final AtomicInteger __id = new AtomicInteger();
|
||||
@ -213,9 +210,6 @@ class ClientConnectionRunner {
|
||||
_acceptedPending.clear();
|
||||
if (_sessionKeyManager != null)
|
||||
_sessionKeyManager.shutdown();
|
||||
if (_floodfillNetworkDatabaseFacade != null)
|
||||
if (_floodfillNetworkDatabaseFacade.isClientDb())
|
||||
_floodfillNetworkDatabaseFacade.shutdown();
|
||||
if (_encryptedLSHash != null)
|
||||
_manager.unregisterEncryptedDestination(this, _encryptedLSHash);
|
||||
_manager.unregisterConnection(this);
|
||||
@ -226,12 +220,12 @@ class ClientConnectionRunner {
|
||||
// _sessions will be empty.
|
||||
for (SessionParams sp : _sessions.values()) {
|
||||
LeaseSet ls = sp.currentLeaseSet;
|
||||
if (ls != null && getFloodfillNetworkDatabaseFacade() != null)
|
||||
getFloodfillNetworkDatabaseFacade().unpublish(ls);
|
||||
if (ls != null)
|
||||
_context.netDb().unpublish(ls);
|
||||
// unpublish encrypted LS also
|
||||
ls = sp.currentEncryptedLeaseSet;
|
||||
if (ls != null && getFloodfillNetworkDatabaseFacade() != null)
|
||||
getFloodfillNetworkDatabaseFacade().unpublish(ls);
|
||||
if (ls != null)
|
||||
_context.netDb().unpublish(ls);
|
||||
if (!sp.isPrimary)
|
||||
_context.tunnelManager().removeAlias(sp.dest);
|
||||
}
|
||||
@ -242,6 +236,8 @@ class ClientConnectionRunner {
|
||||
sp.rerequestTimer.cancel();
|
||||
}
|
||||
}
|
||||
if (_floodfillNetworkDatabaseFacade != null)
|
||||
_floodfillNetworkDatabaseFacade.shutdown();
|
||||
synchronized (_alreadyProcessed) {
|
||||
_alreadyProcessed.clear();
|
||||
}
|
||||
@ -467,12 +463,12 @@ class ClientConnectionRunner {
|
||||
// Tell client manger
|
||||
_manager.unregisterSession(id, sp.dest);
|
||||
LeaseSet ls = sp.currentLeaseSet;
|
||||
if (ls != null && getFloodfillNetworkDatabaseFacade() != null)
|
||||
getFloodfillNetworkDatabaseFacade().unpublish(ls);
|
||||
if (ls != null && _floodfillNetworkDatabaseFacade != null)
|
||||
_floodfillNetworkDatabaseFacade.unpublish(ls);
|
||||
// unpublish encrypted LS also
|
||||
ls = sp.currentEncryptedLeaseSet;
|
||||
if (ls != null && getFloodfillNetworkDatabaseFacade() != null)
|
||||
getFloodfillNetworkDatabaseFacade().unpublish(ls);
|
||||
if (ls != null && _floodfillNetworkDatabaseFacade != null)
|
||||
_floodfillNetworkDatabaseFacade.unpublish(ls);
|
||||
isPrimary = sp.isPrimary;
|
||||
if (isPrimary)
|
||||
_context.tunnelManager().removeTunnels(sp.dest);
|
||||
@ -492,12 +488,12 @@ class ClientConnectionRunner {
|
||||
_log.info("Destroying remaining client subsession " + sp.sessionId);
|
||||
_manager.unregisterSession(sp.sessionId, sp.dest);
|
||||
LeaseSet ls = sp.currentLeaseSet;
|
||||
if (ls != null && getFloodfillNetworkDatabaseFacade() != null)
|
||||
getFloodfillNetworkDatabaseFacade().unpublish(ls);
|
||||
if (ls != null && _floodfillNetworkDatabaseFacade != null)
|
||||
_floodfillNetworkDatabaseFacade.unpublish(ls);
|
||||
// unpublish encrypted LS also
|
||||
ls = sp.currentEncryptedLeaseSet;
|
||||
if (ls != null && getFloodfillNetworkDatabaseFacade() != null)
|
||||
getFloodfillNetworkDatabaseFacade().unpublish(ls);
|
||||
if (ls != null && _floodfillNetworkDatabaseFacade != null)
|
||||
_floodfillNetworkDatabaseFacade.unpublish(ls);
|
||||
_context.tunnelManager().removeAlias(sp.dest);
|
||||
synchronized(this) {
|
||||
if (sp.rerequestTimer != null)
|
||||
@ -572,18 +568,6 @@ class ClientConnectionRunner {
|
||||
public int sessionEstablished(SessionConfig config) {
|
||||
Destination dest = config.getDestination();
|
||||
Hash destHash = dest.calculateHash();
|
||||
if (destHash != null){
|
||||
if (_log.shouldLog(Log.DEBUG)) {
|
||||
_log.debug("Initializing subDb for client" + destHash);
|
||||
}
|
||||
_floodfillNetworkDatabaseFacade = new FloodfillNetworkDatabaseFacade(_context, destHash);
|
||||
_floodfillNetworkDatabaseFacade.startup();
|
||||
} else {
|
||||
if (_log.shouldLog(Log.ERROR)) {
|
||||
_log.error("Initializing subDb for unknown client" + dest, new Exception());
|
||||
}
|
||||
_floodfillNetworkDatabaseFacade = null;
|
||||
}
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("SessionEstablished called for destination " + destHash);
|
||||
if (_sessions.size() > MAX_SESSIONS)
|
||||
@ -610,6 +594,7 @@ class ClientConnectionRunner {
|
||||
_dontSendMSM = "none".equals(opts.getProperty(I2PClient.PROP_RELIABILITY, "").toLowerCase(Locale.US));
|
||||
_dontSendMSMOnReceive = Boolean.parseBoolean(opts.getProperty(I2PClient.PROP_FAST_RECEIVE));
|
||||
}
|
||||
|
||||
// Set up the
|
||||
// per-destination session key manager to prevent rather easy correlation
|
||||
// based on the specified encryption types in the config
|
||||
@ -661,6 +646,12 @@ class ClientConnectionRunner {
|
||||
}
|
||||
}
|
||||
}
|
||||
if (isPrimary && _floodfillNetworkDatabaseFacade == null) {
|
||||
if (_log.shouldDebug())
|
||||
_log.debug("Initializing subDb for client" + destHash);
|
||||
_floodfillNetworkDatabaseFacade = new FloodfillNetworkDatabaseFacade(_context, destHash);
|
||||
_floodfillNetworkDatabaseFacade.startup();
|
||||
}
|
||||
return _manager.destinationEstablished(this, dest);
|
||||
}
|
||||
|
||||
@ -1172,29 +1163,15 @@ class ClientConnectionRunner {
|
||||
|
||||
/**
|
||||
* Get the FloodfillNetworkDatabaseFacade for this runner. This is the client
|
||||
* netDb if the router is configured to use subDbs, or the main netDb if the
|
||||
* router is configured to use a monolithic netDb.
|
||||
* netDb.
|
||||
*
|
||||
* If neither a client netDb or the main netDb is available, it will return null.
|
||||
* This should be impossible.
|
||||
* If you get the `getFloodfillNetworkDatabaseFacade is null for runner` warning,
|
||||
* the main netDb will be returned instead. If the main netDb is null, then null
|
||||
* will be returned.
|
||||
* If a session has not been created yet, it will return null.
|
||||
*
|
||||
* @return _floodfillNetworkDatabaseFacade
|
||||
* @return the client netdb or null if no session was created yet
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public FloodfillNetworkDatabaseFacade getFloodfillNetworkDatabaseFacade() {
|
||||
if (!_context.netDbSegmentor().useSubDbs())
|
||||
return _context.netDb();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("getFloodfillNetworkDatabaseFacade is getting the subDb for dbid: " + this.getDestHash());
|
||||
if (_floodfillNetworkDatabaseFacade == null) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("getFloodfillNetworkDatabaseFacade is null for runner");
|
||||
return _context.netDb();
|
||||
}
|
||||
return this._floodfillNetworkDatabaseFacade;
|
||||
return _floodfillNetworkDatabaseFacade;
|
||||
}
|
||||
|
||||
private class MessageDeliveryStatusUpdate extends JobImpl {
|
||||
|
@ -777,8 +777,9 @@ class ClientManager {
|
||||
* get the FloodfillNetworkDatabaseFacade associated with a particular client destination.
|
||||
* This is inside the runner, so it won't be there if the runner isn't ready.
|
||||
*
|
||||
* @param destHash destination hash associated with the client who's subDb we're looking for
|
||||
* @return may be null if it does not exist and the main netDb is not initialized
|
||||
* @param destHash destination hash associated with the client who's subDb we're looking for, may be null
|
||||
* @return will be null if desthash is null or client does not exist or its netDb is not initialized
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public FloodfillNetworkDatabaseFacade getClientFloodfillNetworkDatabaseFacade(Hash destHash) {
|
||||
if (destHash != null) {
|
||||
@ -801,15 +802,14 @@ class ClientManager {
|
||||
* get all of the FloodfillNetworkDatabaseFacades for all of the clients.
|
||||
*
|
||||
* @return non-null
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public Set<FloodfillNetworkDatabaseFacade> getClientFloodfillNetworkDatabaseFacades() {
|
||||
Set<FloodfillNetworkDatabaseFacade> rv = new HashSet<FloodfillNetworkDatabaseFacade>();
|
||||
for (ClientConnectionRunner runner : _runners.values()) {
|
||||
if (runner != null){
|
||||
FloodfillNetworkDatabaseFacade fndf = runner.getFloodfillNetworkDatabaseFacade();
|
||||
if (fndf != null)
|
||||
rv.add(fndf);
|
||||
}
|
||||
FloodfillNetworkDatabaseFacade fndf = runner.getFloodfillNetworkDatabaseFacade();
|
||||
if (fndf != null)
|
||||
rv.add(fndf);
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
@ -817,7 +817,8 @@ class ClientManager {
|
||||
/**
|
||||
* get all the primary hashes for all the clients and return them as a set
|
||||
*
|
||||
* @return
|
||||
* @return non-null
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public Set<Hash> getPrimaryHashes() {
|
||||
Set<Hash> rv = new HashSet<Hash>();
|
||||
|
@ -861,9 +861,9 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
|
||||
_log.warn("Unsupported BlindingInfo type: " + message);
|
||||
return;
|
||||
}
|
||||
BlindData obd = _runner.getFloodfillNetworkDatabaseFacade().getBlindData(spk);
|
||||
BlindData obd = _context.netDb().getBlindData(spk);
|
||||
if (obd == null) {
|
||||
_runner.getFloodfillNetworkDatabaseFacade().setBlindData(bd);
|
||||
_context.netDb().setBlindData(bd);
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("New: " + bd);
|
||||
} else {
|
||||
@ -884,7 +884,7 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
|
||||
return;
|
||||
}
|
||||
}
|
||||
_runner.getFloodfillNetworkDatabaseFacade().setBlindData(bd);
|
||||
_context.netDb().setBlindData(bd);
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("Updated: " + bd);
|
||||
} else {
|
||||
@ -893,7 +893,7 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
|
||||
if (nexp > oexp) {
|
||||
obd.setExpiration(nexp);
|
||||
// to force save at shutdown
|
||||
_runner.getFloodfillNetworkDatabaseFacade().setBlindData(obd);
|
||||
_context.netDb().setBlindData(obd);
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("Updated expiration: " + obd);
|
||||
} else {
|
||||
|
@ -21,6 +21,7 @@ import net.i2p.data.i2cp.I2CPMessage;
|
||||
import net.i2p.data.i2cp.I2CPMessageException;
|
||||
import net.i2p.data.i2cp.SessionId;
|
||||
import net.i2p.router.JobImpl;
|
||||
import net.i2p.router.NetworkDatabaseFacade;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
@ -91,7 +92,7 @@ class LookupDestJob extends JobImpl {
|
||||
try {
|
||||
bd = Blinding.decode(context, b);
|
||||
SigningPublicKey spk = bd.getUnblindedPubKey();
|
||||
BlindData bd2 = _runner.getFloodfillNetworkDatabaseFacade().getBlindData(spk);
|
||||
BlindData bd2 = getContext().netDb().getBlindData(spk);
|
||||
if (bd2 != null) {
|
||||
// BlindData from database may have privkey or secret
|
||||
// check if we need it but don't have it
|
||||
@ -110,7 +111,7 @@ class LookupDestJob extends JobImpl {
|
||||
long exp = now + ((bd.getAuthRequired() || bd.getSecretRequired()) ? 365*24*60*60*1000L
|
||||
: 90*24*68*60*1000L);
|
||||
bd.setExpiration(exp);
|
||||
_runner.getFloodfillNetworkDatabaseFacade().setBlindData(bd);
|
||||
getContext().netDb().setBlindData(bd);
|
||||
}
|
||||
h = bd.getBlindedHash();
|
||||
if (_log.shouldDebug())
|
||||
@ -185,7 +186,10 @@ class LookupDestJob extends JobImpl {
|
||||
if (timeout > 1500)
|
||||
timeout -= 500;
|
||||
// TODO tell router this is an encrypted lookup, skip 38 or earlier ffs?
|
||||
_runner.getFloodfillNetworkDatabaseFacade().lookupDestination(_hash, done, timeout, _fromLocalDest);
|
||||
NetworkDatabaseFacade db = _runner.getFloodfillNetworkDatabaseFacade();
|
||||
if (db == null)
|
||||
db = getContext().netDb();
|
||||
db.lookupDestination(_hash, done, timeout, _fromLocalDest);
|
||||
} else {
|
||||
// blinding decode fail
|
||||
returnFail(HostReplyMessage.RESULT_DECRYPTION_FAILURE);
|
||||
@ -204,10 +208,13 @@ class LookupDestJob extends JobImpl {
|
||||
}
|
||||
public String getName() { return "LeaseSet Lookup Reply to Client"; }
|
||||
public void runJob() {
|
||||
Destination dest = _runner.getFloodfillNetworkDatabaseFacade().lookupDestinationLocally(_hash);
|
||||
NetworkDatabaseFacade db = _runner.getFloodfillNetworkDatabaseFacade();
|
||||
if (db == null)
|
||||
db = getContext().netDb();
|
||||
Destination dest = db.lookupDestinationLocally(_hash);
|
||||
if (dest == null && _blindData != null) {
|
||||
// TODO store and lookup original hash instead
|
||||
LeaseSet ls = _runner.getFloodfillNetworkDatabaseFacade().lookupLeaseSetLocally(_hash);
|
||||
LeaseSet ls = db.lookupLeaseSetLocally(_hash);
|
||||
if (ls != null && ls.getType() == DatabaseEntry.KEY_TYPE_ENCRYPTED_LS2) {
|
||||
// already decrypted
|
||||
EncryptedLeaseSet encls = (EncryptedLeaseSet) ls;
|
||||
|
@ -147,10 +147,11 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
// Only send it out if it is in our estimated keyspace.
|
||||
// For this, we do NOT use their dontInclude list as it can't be trusted
|
||||
// (i.e. it could mess up the closeness calculation)
|
||||
LeaseSet possibleMultihomed = null;
|
||||
/*LeaseSet possibleMultihomed = null;
|
||||
if (getContext().netDbSegmentor().useSubDbs()) {
|
||||
possibleMultihomed = getContext().multihomeNetDb().lookupLeaseSetLocally(searchKey);
|
||||
}
|
||||
*/
|
||||
Set<Hash> closestHashes = getContext().netDb().findNearestRouters(searchKey,
|
||||
CLOSENESS_THRESHOLD, null);
|
||||
if (weAreClosest(closestHashes)) {
|
||||
@ -165,7 +166,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
_log.info("We have local LS " + searchKey + ", answering query, in our keyspace");
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalClosest", 1);
|
||||
sendData(searchKey, ls, fromKey, toTunnel);
|
||||
} else if (getContext().netDbSegmentor().useSubDbs() && possibleMultihomed != null) {
|
||||
/*} else if (getContext().netDbSegmentor().useSubDbs() && possibleMultihomed != null) {
|
||||
// If it's in the possibleMultihomed cache, then it was definitely stored to us meaning it is effectively
|
||||
// always recievedAsPublished. No need to decide whether or not to answer the request like above, just
|
||||
// answer it so it doesn't look different from other stores.
|
||||
@ -174,7 +175,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
_log.info("We have local LS, possibly from a multihomed router " + searchKey + ", and somebody requested it back from us. Answering query, as if in our keyspace, to avoid attack.");
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalMultihome", 1);
|
||||
sendData(searchKey, possibleMultihomed, fromKey, toTunnel);
|
||||
}
|
||||
}*/
|
||||
} else {
|
||||
// Lie, pretend we don't have it
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
@ -184,7 +185,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
|
||||
}
|
||||
} else {
|
||||
LeaseSet possibleMultihomed = null;
|
||||
/*LeaseSet possibleMultihomed = null;
|
||||
if (getContext().netDbSegmentor().useSubDbs()) {
|
||||
possibleMultihomed = getContext().multihomeNetDb().lookupLeaseSetLocally(searchKey);
|
||||
}
|
||||
@ -195,7 +196,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalMultihome", 1);
|
||||
sendData(searchKey, possibleMultihomed, fromKey, toTunnel);
|
||||
}
|
||||
} else {
|
||||
} else {*/
|
||||
// It was not published to us (we looked it up, for example)
|
||||
// or it's local and we aren't floodfill,
|
||||
// or it's local and we don't publish it.
|
||||
@ -207,7 +208,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1);
|
||||
Set<Hash> routerHashSet = getNearestRouters(lookupType);
|
||||
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
|
||||
}
|
||||
/*}*/
|
||||
}
|
||||
} else if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO &&
|
||||
lookupType != DatabaseLookupMessage.Type.LS) {
|
||||
|
@ -74,9 +74,9 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
||||
_multihomeDbid = new FloodfillNetworkDatabaseFacade(_context, MULTIHOME_DBID);
|
||||
}
|
||||
|
||||
public boolean useSubDbs() {
|
||||
return _context.getProperty(PROP_NETDB_ISOLATION, true);
|
||||
}
|
||||
//public boolean useSubDbs() {
|
||||
//return _context.getProperty(PROP_NETDB_ISOLATION, true);
|
||||
//}
|
||||
|
||||
/**
|
||||
* Retrieves the FloodfillNetworkDatabaseFacade object for the specified ID.
|
||||
@ -87,8 +87,8 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
||||
*/
|
||||
@Override
|
||||
protected FloodfillNetworkDatabaseFacade getSubNetDB(Hash id) {
|
||||
if (!useSubDbs())
|
||||
return _mainDbid;
|
||||
/*if (!useSubDbs())
|
||||
return _mainDbid;*/
|
||||
return _context.clientManager().getClientFloodfillNetworkDatabaseFacade(id);
|
||||
}
|
||||
|
||||
@ -103,8 +103,8 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("shutdown called from FNDS, shutting down main and multihome db");
|
||||
_mainDbid.shutdown();
|
||||
if (useSubDbs())
|
||||
_multihomeDbid.shutdown();
|
||||
/*if (!useSubDbs())
|
||||
_multihomeDbid.shutdown();*/
|
||||
}
|
||||
|
||||
/**
|
||||
@ -117,8 +117,8 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("startup called from FNDS, starting up main and multihome db");
|
||||
_mainDbid.startup();
|
||||
if (useSubDbs())
|
||||
_multihomeDbid.startup();
|
||||
/*if (useSubDbs())
|
||||
_multihomeDbid.startup();*/
|
||||
}
|
||||
|
||||
/**
|
||||
@ -268,25 +268,26 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
||||
}
|
||||
|
||||
/**
|
||||
* get the client netDb for the given id
|
||||
* Will return the "exploratory(default client)" netDb if
|
||||
* the dbid is null.
|
||||
* Get the client netDb for the given id.
|
||||
* Will return the main netDb if
|
||||
* the dbid is null or the client db is not found.
|
||||
*
|
||||
* @param id may be null
|
||||
* @return non-null
|
||||
* @since 0.9.60
|
||||
* @return may be null if the client netDb does not exist
|
||||
*/
|
||||
@Override
|
||||
public FloodfillNetworkDatabaseFacade clientNetDB(Hash id) {
|
||||
if (_log.shouldDebug())
|
||||
_log.debug("looked up clientNetDB: " + id);
|
||||
if (!useSubDbs())
|
||||
return _mainDbid;
|
||||
/*if (!useSubDbs())
|
||||
return _mainDbid;*/
|
||||
if (id != null){
|
||||
FloodfillNetworkDatabaseFacade fndf = getSubNetDB(id);
|
||||
if (fndf != null)
|
||||
return fndf;
|
||||
}
|
||||
return mainNetDB();
|
||||
return _mainDbid;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -328,10 +329,10 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
||||
if (!_mainDbid.isInitialized())
|
||||
return Collections.emptySet();
|
||||
Set<FloodfillNetworkDatabaseFacade> rv = new HashSet<>();
|
||||
if (!useSubDbs()) {
|
||||
/*if (!useSubDbs()) {
|
||||
rv.add(_mainDbid);
|
||||
return rv;
|
||||
}
|
||||
}*/
|
||||
rv.add(_mainDbid);
|
||||
rv.add(multiHomeNetDB());
|
||||
rv.addAll(_context.clientManager().getClientFloodfillNetworkDatabaseFacades());
|
||||
@ -350,10 +351,10 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
||||
if (!_mainDbid.isInitialized())
|
||||
return Collections.emptySet();
|
||||
Set<FloodfillNetworkDatabaseFacade> rv = new HashSet<>();
|
||||
if (!useSubDbs()) {
|
||||
/*if (!useSubDbs()) {
|
||||
rv.add(_mainDbid);
|
||||
return rv;
|
||||
}
|
||||
}*/
|
||||
rv.addAll(_context.clientManager().getClientFloodfillNetworkDatabaseFacades());
|
||||
return rv;
|
||||
}
|
||||
|
@ -148,12 +148,13 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
|
||||
blockStore = false;
|
||||
if (blockStore) {
|
||||
getContext().statManager().addRateData("netDb.storeLocalLeaseSetAttempt", 1, 0);
|
||||
// If we're using subdbs, store the leaseSet in the multihome DB.
|
||||
// If we're not using subdbs, store the leaseSet in the multihome DB. (disabled/commented out below)
|
||||
// otherwise, throw rather than return, so that we send the ack below (prevent easy attack)
|
||||
// We should actually never hit this code unless we're using a monolithid netDb, which is now disabled.
|
||||
dontBlamePeer = true;
|
||||
if (getContext().netDbSegmentor().useSubDbs())
|
||||
/*if (getContext().netDbSegmentor().useSubDbs())
|
||||
getContext().multihomeNetDb().store(key, ls);
|
||||
else
|
||||
else*/
|
||||
throw new IllegalArgumentException("(dbid: " + _facade._dbid
|
||||
+ ") Peer attempted to store local leaseSet: "
|
||||
+ key.toBase32());
|
||||
|
@ -281,6 +281,8 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
* Cannot be restarted.
|
||||
*/
|
||||
public synchronized void shutdown() {
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("DB shutdown " + this);
|
||||
_initialized = false;
|
||||
if (!_context.commSystem().isDummy() && isMainDb() &&
|
||||
_context.router().getUptime() > ROUTER_INFO_EXPIRATION_FLOODFILL + 10*60*1000 + 60*1000) {
|
||||
@ -376,7 +378,8 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
}
|
||||
|
||||
public synchronized void startup() {
|
||||
_log.info("Starting up the kademlia network database");
|
||||
if (_log.shouldInfo())
|
||||
_log.info("Starting up the " + this);
|
||||
RouterInfo ri = _context.router().getRouterInfo();
|
||||
String dbDir = _context.getProperty(PROP_DB_DIR, DEFAULT_DB_DIR);
|
||||
_kb = new KBucketSet<Hash>(_context, ri.getIdentity().getHash(),
|
||||
@ -885,7 +888,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
_log.error("locally published leaseSet is not valid?", iae);
|
||||
throw iae;
|
||||
}
|
||||
if (!_context.netDbSegmentor().useSubDbs()){
|
||||
//if (!_context.netDbSegmentor().useSubDbs()){
|
||||
String dbid = "main netDb";
|
||||
if (isClientDb()) {
|
||||
dbid = "client netDb: " + _dbid;
|
||||
@ -903,7 +906,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("[" + dbid + "]" + "Local client LS key initialized to: " + _localKey);
|
||||
}
|
||||
}
|
||||
//}
|
||||
if (!_context.clientManager().shouldPublishLeaseSet(h))
|
||||
return;
|
||||
// If we're exiting, don't publish.
|
||||
@ -1702,4 +1705,16 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
out.write(_kb.toString().replace("\n", "<br>\n"));
|
||||
}
|
||||
|
||||
/**
|
||||
* @since 0.9.60
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
if (isMainDb())
|
||||
return "Main NetDB";
|
||||
if (isMultihomeDb())
|
||||
return "Multihome NetDB";
|
||||
return "Client NetDB " + _dbid.toBase64();
|
||||
}
|
||||
}
|
||||
|
@ -67,9 +67,7 @@ public abstract class SegmentedNetworkDatabaseFacade {
|
||||
* @return true if using subDbs, false if not
|
||||
* @since 0.9.60
|
||||
*/
|
||||
public boolean useSubDbs() {
|
||||
return false;
|
||||
}
|
||||
//public abstract boolean useSubDbs();
|
||||
|
||||
/**
|
||||
* Get a sub-netDb using a Hash identifier
|
||||
|
@ -9,6 +9,7 @@ import net.i2p.data.TunnelId;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.TunnelInfo;
|
||||
import net.i2p.router.TunnelPoolSettings;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
@ -115,9 +116,14 @@ public class AliasedTunnelPool extends TunnelPool {
|
||||
|
||||
@Override
|
||||
protected LeaseSet locked_buildNewLeaseSet() {
|
||||
LeaseSet ls = _context.clientNetDb(_aliasOf.getSettings().getDestination()).lookupLeaseSetLocally(_aliasOf.getSettings().getDestination());
|
||||
if (ls == null)
|
||||
Hash primary = _aliasOf.getSettings().getDestination();
|
||||
FloodfillNetworkDatabaseFacade db = _context.clientNetDb(primary);
|
||||
LeaseSet ls = db.lookupLeaseSetLocally(primary);
|
||||
if (ls == null) {
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("No primary LS " + primary + " to copy for " + getSettings().getDestination() + " in db " + db);
|
||||
return null;
|
||||
}
|
||||
// copy everything so it isn't corrupted
|
||||
LeaseSet rv = new LeaseSet();
|
||||
for (int i = 0; i < ls.getLeaseCount(); i++) {
|
||||
|
Reference in New Issue
Block a user