Compare commits

...

5 Commits

15 changed files with 178 additions and 43 deletions

View File

@ -121,4 +121,11 @@ public abstract class ClientManagerFacade implements Service {
* @since 0.9.41
*/
public void unregisterMetaDest(Destination dest) {}
/**
* get the primary dbid of a client by the dest
*
* @since 0.9.60
*/
public abstract Hash getPrimaryDbid(Hash dest);
}

View File

@ -179,6 +179,7 @@ class ClientConnectionRunner {
_out = new BufferedOutputStream(_socket.getOutputStream());
_reader.startReading();
// TODO need a cleaner for unclaimed items in _messages, but we have no timestamps...
_context.netDbSegmentor().createClientNetDB(this.getDestHash());
}
/**
@ -211,7 +212,6 @@ class ClientConnectionRunner {
_manager.unregisterEncryptedDestination(this, _encryptedLSHash);
_manager.unregisterConnection(this);
// netdb may be null in unit tests
Hash dbid = getDestHash();
if (_context.netDbSegmentor() != null) {
// Note that if the client sent us a destroy message,
// removeSession() was called just before this, and
@ -219,11 +219,11 @@ class ClientConnectionRunner {
for (SessionParams sp : _sessions.values()) {
LeaseSet ls = sp.currentLeaseSet;
if (ls != null)
_context.clientNetDb(dbid).unpublish(ls);
_context.clientNetDb(getDestHash()).unpublish(ls);
// unpublish encrypted LS also
ls = sp.currentEncryptedLeaseSet;
if (ls != null)
_context.clientNetDb(dbid).unpublish(ls);
_context.clientNetDb(getDestHash()).unpublish(ls);
if (!sp.isPrimary)
_context.tunnelManager().removeAlias(sp.dest);
}
@ -449,7 +449,6 @@ class ClientConnectionRunner {
if (id == null)
return;
boolean isPrimary = false;
Hash dbid = getDestHash();
for (Iterator<SessionParams> iter = _sessions.values().iterator(); iter.hasNext(); ) {
SessionParams sp = iter.next();
if (id.equals(sp.sessionId)) {
@ -460,11 +459,11 @@ class ClientConnectionRunner {
_manager.unregisterSession(id, sp.dest);
LeaseSet ls = sp.currentLeaseSet;
if (ls != null)
_context.clientNetDb(dbid).unpublish(ls);
_context.clientNetDb(getDestHash()).unpublish(ls);
// unpublish encrypted LS also
ls = sp.currentEncryptedLeaseSet;
if (ls != null)
_context.clientNetDb(dbid).unpublish(ls);
_context.clientNetDb(getDestHash()).unpublish(ls);
isPrimary = sp.isPrimary;
if (isPrimary)
_context.tunnelManager().removeTunnels(sp.dest);
@ -485,11 +484,11 @@ class ClientConnectionRunner {
_manager.unregisterSession(sp.sessionId, sp.dest);
LeaseSet ls = sp.currentLeaseSet;
if (ls != null)
_context.clientNetDb(dbid).unpublish(ls);
_context.clientNetDb(getDestHash()).unpublish(ls);
// unpublish encrypted LS also
ls = sp.currentEncryptedLeaseSet;
if (ls != null)
_context.clientNetDb(dbid).unpublish(ls);
_context.clientNetDb(getDestHash()).unpublish(ls);
_context.tunnelManager().removeAlias(sp.dest);
synchronized(this) {
if (sp.rerequestTimer != null)

View File

@ -668,6 +668,23 @@ class ClientManager {
return null;
return _runnersByHash.get(destHash);
}
/**
*
* get the client's primary dbid
*
* @param fromDest
*
* @since 0.9.60
*/
public Hash getPrimaryDbid(Hash fromDest) {
// first, get the runner
ClientConnectionRunner runner = getRunner(fromDest);
if (runner != null) {
return runner.getDestHash();
}
return null;
}
/**
* @param id the router's ID for this message

View File

@ -220,6 +220,10 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade implements Inte
return null;
}
}
public Hash getPrimaryDbid(Hash dest) {
return _manager.getPrimaryDbid(dest);
}
/**
* Return the client's current manager or null if not connected

View File

@ -83,6 +83,7 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
_context = context;
_log = _context.logManager().getLog(ClientMessageEventListener.class);
_runner = runner;
_context.netDbSegmentor().createClientNetDB(_runner.getDestHash());
_enforceAuth = enforceAuth;
if ((!_enforceAuth) || !_context.getBooleanProperty(PROP_AUTH))
_authorized = true;

View File

@ -95,7 +95,7 @@ class LookupDestJob extends JobImpl {
if (_fromLocalDest == null)
bd2 = getContext().mainNetDb().getBlindData(spk);
else
bd2 = getContext().clientNetDb(_fromLocalDest).getBlindData(spk);
bd2 = getContext().clientNetDb(_runner.getDestHash()).getBlindData(spk);
if (bd2 != null) {
// BlindData from database may have privkey or secret
// check if we need it but don't have it
@ -114,7 +114,7 @@ class LookupDestJob extends JobImpl {
long exp = now + ((bd.getAuthRequired() || bd.getSecretRequired()) ? 365*24*60*60*1000L
: 90*24*68*60*1000L);
bd.setExpiration(exp);
getContext().clientNetDb(_fromLocalDest).setBlindData(bd);
getContext().clientNetDb(_runner.getDestHash()).setBlindData(bd);
}
h = bd.getBlindedHash();
if (_log.shouldDebug())
@ -189,7 +189,7 @@ class LookupDestJob extends JobImpl {
if (timeout > 1500)
timeout -= 500;
// TODO tell router this is an encrypted lookup, skip 38 or earlier ffs?
getContext().clientNetDb(_fromLocalDest).lookupDestination(_hash, done, timeout, _fromLocalDest);
getContext().clientNetDb(_runner.getDestHash()).lookupDestination(_hash, done, timeout, _fromLocalDest);
} else {
// blinding decode fail
returnFail(HostReplyMessage.RESULT_DECRYPTION_FAILURE);
@ -198,7 +198,7 @@ class LookupDestJob extends JobImpl {
private String toBase32(){
if (_fromLocalDest != null)
return _fromLocalDest.toBase32();
return _runner.getDestHash().toBase32();
return null;
}
@ -208,10 +208,10 @@ class LookupDestJob extends JobImpl {
}
public String getName() { return "LeaseSet Lookup Reply to Client"; }
public void runJob() {
Destination dest = getContext().clientNetDb(_fromLocalDest).lookupDestinationLocally(_hash);
Destination dest = getContext().clientNetDb(_runner.getDestHash()).lookupDestinationLocally(_hash);
if (dest == null && _blindData != null) {
// TODO store and lookup original hash instead
LeaseSet ls = getContext().clientNetDb(_fromLocalDest).lookupLeaseSetLocally(_hash);
LeaseSet ls = getContext().clientNetDb(_runner.getDestHash()).lookupLeaseSetLocally(_hash);
if (ls != null && ls.getType() == DatabaseEntry.KEY_TYPE_ENCRYPTED_LS2) {
// already decrypted
EncryptedLeaseSet encls = (EncryptedLeaseSet) ls;

View File

@ -49,6 +49,8 @@ public class DummyClientManagerFacade extends ClientManagerFacade {
public SessionKeyManager getClientSessionKeyManager(Hash _dest) { return null; }
public void requestLeaseSet(Hash dest, LeaseSet set) {}
public Hash getPrimaryDbid(Hash dest){
return null;
}
}

View File

@ -123,6 +123,26 @@ public class DummyNetworkDatabaseFacade extends SegmentedNetworkDatabaseFacade {
return _fndb;
}
@Override
public FloodfillNetworkDatabaseFacade createClientNetDB(String dbid) {
throw new UnsupportedOperationException("Unimplemented method 'createClientNetDB'");
}
@Override
public FloodfillNetworkDatabaseFacade createClientNetDB(Hash dbid) {
throw new UnsupportedOperationException("Unimplemented method 'createClientNetDB'");
}
@Override
public void removeClientNetDB(String dbid) {
throw new UnsupportedOperationException("Unimplemented method 'removeClientNetDB'");
}
@Override
public void removeClientNetDB(Hash dbid) {
throw new UnsupportedOperationException("Unimplemented method 'removeClientNetDB'");
}
@Override
public String getDbidByHash(Hash clientKey) {
throw new UnsupportedOperationException("Unimplemented method 'lookupLeaseSetHashIsClient'");

View File

@ -209,7 +209,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
_hashPair = new OutboundCache.HashPair(_from.calculateHash(), toHash);
_toString = toHash.toBase32();
// we look up here rather than runJob() so we may adjust the timeout
_leaseSet = ctx.clientNetDb(_from.calculateHash()).lookupLeaseSetLocally(toHash);
_leaseSet = ctx.clientNetDb(getContext().clientManager().getPrimaryDbid(_from.calculateHash())).lookupLeaseSetLocally(toHash);
// use expiration requested by client if available, otherwise session config,
// otherwise router config, otherwise default
@ -307,7 +307,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
if (_log.shouldInfo())
_log.info(getJobId() + ": RAP LS, firing search: " + _leaseSet.getHash().toBase32());
LookupLeaseSetFailedJob failed = new LookupLeaseSetFailedJob(getContext());
getContext().clientNetDb(_from.calculateHash()).lookupLeaseSetRemotely(_leaseSet.getHash(), success, failed,
getContext().clientNetDb(getContext().clientManager().getPrimaryDbid(_from.calculateHash())).lookupLeaseSetRemotely(_leaseSet.getHash(), success, failed,
LS_LOOKUP_TIMEOUT, _from.calculateHash());
} else {
dieFatal(MessageStatusMessage.STATUS_SEND_FAILURE_NO_LEASESET);
@ -330,7 +330,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
long exp = now - _leaseSet.getLatestLeaseDate();
_log.info(getJobId() + ": leaseSet expired " + DataHelper.formatDuration(exp) + " ago, firing search: " + _leaseSet.getHash().toBase32());
}
getContext().clientNetDb(_from.calculateHash()).lookupLeaseSetRemotely(_leaseSet.getHash(), _from.calculateHash());
getContext().clientNetDb(getContext().clientManager().getPrimaryDbid(_from.calculateHash())).lookupLeaseSetRemotely(_leaseSet.getHash(), _from.calculateHash());
}
}
success.runJob();
@ -340,7 +340,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
_log.debug(getJobId() + ": Send outbound client message - sending off leaseSet lookup job for " + _toString + " from client " + _from.calculateHash().toBase32());
LookupLeaseSetFailedJob failed = new LookupLeaseSetFailedJob(getContext());
Hash key = _to.calculateHash();
getContext().clientNetDb(_from.calculateHash()).lookupLeaseSet(key, success, failed, LS_LOOKUP_TIMEOUT, _from.calculateHash());
getContext().clientNetDb(getContext().clientManager().getPrimaryDbid(_from.calculateHash())).lookupLeaseSet(key, success, failed, LS_LOOKUP_TIMEOUT, _from.calculateHash());
}
}
@ -349,7 +349,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
* @return lease set or null if we should not send the lease set
*/
private LeaseSet getReplyLeaseSet(boolean force) {
LeaseSet newLS = getContext().clientNetDb(_from.calculateHash()).lookupLeaseSetLocally(_from.calculateHash());
LeaseSet newLS = getContext().clientNetDb(getContext().clientManager().getPrimaryDbid(_from.calculateHash())).lookupLeaseSetLocally(_from.calculateHash());
if (newLS == null)
return null; // punt
@ -423,7 +423,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
private int getNextLease() {
// set in runJob if found locally
if (_leaseSet == null || !_leaseSet.getReceivedAsReply()) {
_leaseSet = getContext().clientNetDb(_from.calculateHash()).lookupLeaseSetLocally(_to.calculateHash());
_leaseSet = getContext().clientNetDb(getContext().clientManager().getPrimaryDbid(_from.calculateHash())).lookupLeaseSetLocally(_to.calculateHash());
if (_leaseSet == null) {
// shouldn't happen
if (_log.shouldLog(Log.WARN))
@ -587,7 +587,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
int cause;
if (getContext().clientNetDb(_from.calculateHash()).isNegativeCachedForever(_to.calculateHash())) {
if (getContext().clientNetDb(getContext().clientManager().getPrimaryDbid(_from.calculateHash())).isNegativeCachedForever(_to.calculateHash())) {
if (_log.shouldLog(Log.WARN))
_log.warn("Unable to send to " + _toString + " because the sig type is unsupported");
cause = MessageStatusMessage.STATUS_SEND_FAILURE_UNSUPPORTED_ENCRYPTION;

View File

@ -40,9 +40,9 @@ import net.i2p.util.Log;
* - Multihome NetDB: This is used to stash leaseSets for our own sites when they are
* sent to us by a floodfill, so that we can reply when they are requested back from us
* regardless of our closeness to them in the routing table.
* - Exploratory NetDB: This is used when we want to stash a DatabaseEntry for a key
* during exploration but don't want it to go into the Main NetDB until we do something
* else with it.
* - Exploratory NetDB: This is used when we know we want a client netDb but we don't have
* a hash for it, which should never happen. It is primarily here to debug, if a LeaseSet
* shows up here it means a client netDb wasn't created before it was needed.
*
* And there are an unlimited number of "Client" netDbs. These sub-netDbs are
* intended to contain only the information required to operate them, and as such
@ -96,7 +96,7 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
@Override
protected FloodfillNetworkDatabaseFacade getSubNetDB(Hash id) {
if (id == null)
return getSubNetDB(MAIN_DBID);
return mainNetDB();
return getSubNetDB(id.toBase32());
}
@ -121,6 +121,24 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
id = "clients_" + id;
}
return _subDBs.get(id);
}
/**
* Create a client netDb for a given client string identifier and return it.
* Will never return the mainNetDB.
*
* @since 0.9.60
*/
public FloodfillNetworkDatabaseFacade createClientNetDB(String id) {
if (id == null || id.isEmpty() || id.equals(MAIN_DBID) || id.equals(MULTIHOME_DBID) || id.equals(EXPLORATORY_DBID))
return clientNetDB();
if (id.endsWith(".i2p")) {
if (!id.startsWith("clients_"))
id = "clients_" + id;
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Creating client netDb for " + id);
FloodfillNetworkDatabaseFacade subdb = _subDBs.get(id);
if (subdb == null) {
subdb = new FloodfillNetworkDatabaseFacade(_context, id);
@ -130,6 +148,43 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
}
return subdb;
}
/**
* Create a client netDb for a given client Hash identifier and return it.
* Will never return the mainNetDB.
*
* @since 0.9.60
*/
public FloodfillNetworkDatabaseFacade createClientNetDB(Hash dbid) {
if (dbid == null)
return clientNetDB();
return createClientNetDB(dbid.toBase32());
}
/**
* Remove a client netDb for a given client string identifier. Will never
* remove the mainNetDB.
*
* @since 0.9.60
*/
public void removeClientNetDB(String id) {
if (id == null || id.isEmpty() || id.equals(MAIN_DBID) || id.equals(MULTIHOME_DBID) || id.equals(EXPLORATORY_DBID))
return;
if (id.endsWith(".i2p")) {
if (!id.startsWith("clients_"))
id = "clients_" + id;
}
_subDBs.remove(id);
}
/**
* Remove a client netDb for a given client Hash identifier. Will never
* remove the mainNetDB.
*
* @since 0.9.60
*/
public void removeClientNetDB(Hash dbid) {
if (dbid == null)
return;
removeClientNetDB(dbid.toBase32());
}
/**
* If we are floodfill, turn it off and tell everybody.
@ -145,8 +200,7 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("(dbid: " + subdb._dbid
+ ") Shutting down all remaining sub-netDbs",
new Exception());
+ ") Shutting down all remaining sub-netDbs");
subdb.shutdown();
}
}
@ -162,8 +216,7 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("(dbid: " + subdb._dbid
+ ") Called from FNDS, will be combined with all other subDbs",
new Exception());
+ ") Called from FNDS, will be combined with all other subDbs");
rv.addAll(subdb.getKnownRouterData());
}
return rv;
@ -181,8 +234,7 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("(dbid: " + subdb._dbid
+ ") Deprecated! Arbitrary selection of this subDb",
new Exception());
+ ") getting all Floodfill peers across all subDbs");
peers.addAll(subdb.getFloodfillPeers());
}
return peers;
@ -216,8 +268,7 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("(dbid: " + subdb._dbid
+ ") Deprecated! Arbitrary selection of this subDb",
new Exception());
+ ") lookup lease set locally for " + key.toBase32() + " in all subDbs");
rv = subdb.lookupLeaseSetLocally(key);
if (rv != null) {
return rv;
@ -260,8 +311,7 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
for (FloodfillNetworkDatabaseFacade subdb : getSubNetDBs()) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("(dbid: " + subdb._dbid
+ ") Deprecated! Arbitrary selection of this subDb",
new Exception());
+ ") getting all routers across all subDbs");
rv.addAll(subdb.getRouters());
}
return rv;

View File

@ -1440,7 +1440,13 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
// are any updates
if (_log.shouldLog(Log.INFO))
_log.info("Dropping a lease: " + dbEntry);
_ds.remove(dbEntry, false);
try {
_ds.remove(dbEntry, false);
} catch (UnsupportedOperationException uoe) {
// if this happens it's because we're a TransientDataStore instead,
// so just call remove without the persist option.
_ds.remove(dbEntry);
}
}
/** don't use directly - see F.N.D.F. override */

View File

@ -44,7 +44,7 @@ class RepublishLeaseSetJob extends JobImpl {
try {
if (getContext().clientManager().isLocal(_dest)) {
LeaseSet ls = getContext().clientNetDb(_dest).lookupLeaseSetLocally(_dest);
LeaseSet ls = _facade.lookupLeaseSetLocally(_dest);
if (ls != null) {
if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) {
if (_log.shouldLog(Log.WARN))
@ -103,7 +103,7 @@ class RepublishLeaseSetJob extends JobImpl {
// Don't requeue if there's a newer LS, KNDF will have already done that
LeaseSet ls = null;
if (_dest != null)
ls = getContext().clientNetDb(_dest).lookupLeaseSetLocally(_ls.getHash());
ls = _facade.lookupLeaseSetLocally(_ls.getHash());
else
getContext().mainNetDb().lookupLeaseSetLocally(_ls.getHash());
// ^ _dest should never be null here, right? So maybe instead we return immediately?

View File

@ -99,6 +99,34 @@ public abstract class SegmentedNetworkDatabaseFacade {
* @since 0.9.60
*/
public abstract FloodfillNetworkDatabaseFacade clientNetDB(Hash dbid);
/**
* Create a client netDb for a given client string identifier and return it.
* Will never return the mainNetDB.
*
* @since 0.9.60
*/
public abstract FloodfillNetworkDatabaseFacade createClientNetDB(String dbid);
/**
* Create a client netDb for a given client Hash identifier and return it.
* Will never return the mainNetDB.
*
* @since 0.9.60
*/
public abstract FloodfillNetworkDatabaseFacade createClientNetDB(Hash dbid);
/**
* Remove a client netDb for a given client string identifier. Will never
* remove the mainNetDB.
*
* @since 0.9.60
*/
public abstract void removeClientNetDB(String dbid);
/**
* Remove a client netDb for a given client Hash identifier. Will never
* remove the mainNetDB.
*
* @since 0.9.60
*/
public abstract void removeClientNetDB(Hash dbid);
/**
* Shut down the network database and all subDbs.
*

View File

@ -140,7 +140,7 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
return;
RouterInfo oldri = null;
if (_client != null)
oldri = _context.clientNetDb(_client).lookupRouterInfoLocally(key);
oldri = _context.clientNetDb(_context.clientManager().getPrimaryDbid(_client)).lookupRouterInfoLocally(key);
else
oldri = _context.mainNetDb().lookupRouterInfoLocally(key);
// only update if RI is newer and non-ff
@ -271,7 +271,7 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
if (dsm.getEntry().isLeaseSet()) {
if (_log.shouldLog(Log.INFO))
_log.info("[client: " + _clientNickname + "] Saving LS DSM from client tunnel.");
FloodfillDatabaseStoreMessageHandler _FDSMH = new FloodfillDatabaseStoreMessageHandler(_context, _context.clientNetDb(_client));
FloodfillDatabaseStoreMessageHandler _FDSMH = new FloodfillDatabaseStoreMessageHandler(_context, _context.clientNetDb(_context.clientManager().getPrimaryDbid(_client)));
Job j = _FDSMH.createJob(msg, null, null);
j.runJob();
if (sz > 0) {
@ -403,7 +403,7 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
// ToDo: This should actually have a try and catch.
if (_log.shouldLog(Log.INFO))
_log.info("Store the LS in the correct dbid subDb: " + _client.toBase32());
FloodfillDatabaseStoreMessageHandler _FDSMH = new FloodfillDatabaseStoreMessageHandler(_context, _context.clientNetDb(_client));
FloodfillDatabaseStoreMessageHandler _FDSMH = new FloodfillDatabaseStoreMessageHandler(_context, _context.clientNetDb(_context.clientManager().getPrimaryDbid(_client)));
Job j = _FDSMH.createJob(data, null, null);
j.runJob();
if (sz > 0) {

View File

@ -2,6 +2,7 @@ package net.i2p.router.tunnel.pool;
import java.util.List;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
import net.i2p.data.Lease;
import net.i2p.data.LeaseSet;
@ -115,7 +116,7 @@ public class AliasedTunnelPool extends TunnelPool {
@Override
protected LeaseSet locked_buildNewLeaseSet() {
LeaseSet ls = _context.clientNetDb(_aliasOf.getSettings().getDestination()).lookupLeaseSetLocally(_aliasOf.getSettings().getDestination());
LeaseSet ls = _context.clientNetDb(_context.clientManager().getPrimaryDbid(_aliasOf.getSettings().getDestination())).lookupLeaseSetLocally(_aliasOf.getSettings().getDestination());
if (ls == null)
return null;
// copy everything so it isn't corrupted