Compare commits

...

23 Commits

Author SHA1 Message Date
bb3a58c658 Router: remove unnecessary warning 2023-11-15 12:42:43 -05:00
6b2994b59e Router: remove unnecessary comment 2023-11-15 12:29:37 -05:00
0fe36c8a70 Router: simplify store-blocking logic for subDbs in HFDSMJ 2023-11-15 12:27:45 -05:00
6b06f28474 Router: fix comments on HDLMJ 125-131 2023-11-15 10:24:38 -05:00
97afc8204d Router: Address code review issues for #147
Revise comments, remove redundant logging, fix client netDb blockStore test, always and only reply to RAP leaseSets out of the main DB
2023-11-14 16:50:55 -05:00
2c727d09e2 Router: revise TODO comment in HFDSMJ 2023-11-13 13:10:03 -05:00
fa10194612 Router: update comments in HDLMJ 2023-11-13 12:32:49 -05:00
d845135484 Router: update Role #2 behavior to document exceptions to the rule 2023-11-13 12:15:17 -05:00
64808cb4fe Router: get rid of BlockStore for the main netDb 2023-11-13 12:00:14 -05:00
33d19a128c Router: Revise comment in HFDSMJ to reflect the reality 2023-11-13 11:54:43 -05:00
e55721fe2c Router: get rid of unused local handling 2023-11-08 14:45:51 -05:00
1f35d9f881 Merge branch 'master' of i2pgit.org:i2p-hackers/i2p.i2p into i2p.i2p.2.4.0-simplify-low-level-netdb-handling 2023-11-07 14:42:02 -05:00
469c4ee846 Router: attempt to block local stores when they are directed to the originating client subDb 2023-11-06 19:20:22 -05:00
109277143e Router: remove local-specific handling in HDLMJ 2023-11-06 19:03:50 -05:00
7bbe38504f Router: remove isLocal handling from HandleDatabaseLookupMessageJob 2023-11-06 18:06:29 -05:00
253db3b9be Router: remove isLocal handling from HandleFloodfillDatabaseLookupMessageJob 2023-11-06 17:45:54 -05:00
0fdbf15f58 Router: remove isLocal handling from HandleFloodfillDatabaseStoreMessageJob 2023-11-06 17:39:12 -05:00
d35b4e0f1e Router: InboundMessageDistributor: Remove commented-out useless check 2023-11-06 17:13:50 -05:00
36d94733e2 Router: RefreshRoutersJob: make EXPIRE privatee 2023-11-06 16:54:54 -05:00
4b40314a62 Router: CapacityCalculator: delete commented out section for G cap handling 2023-11-06 16:54:06 -05:00
16d4d0625c Router: FloodfillNetworkDatabaseFacade: update TODO, if fromLocalDest is null and isClientDb is true in a call to FNDF.search throw an IAE, clients should not use exploratory tunnels 2023-11-06 15:55:49 -05:00
b3d0b91db0 Router: FloodfillNetworkDatabaseFacade: remove commented-out chanceOfFloodingOurOwn call 2023-11-06 15:52:58 -05:00
336a01752b Router: FloodfillNetworkDatabaseFacade: remove commented-out chanceOfFloodingOurOwn 2023-11-06 13:32:32 -05:00
2 changed files with 37 additions and 119 deletions

View File

@ -115,66 +115,24 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
if (DatabaseEntry.isLeaseSet(type) &&
(lookupType == DatabaseLookupMessage.Type.ANY || lookupType == DatabaseLookupMessage.Type.LS)) {
LeaseSet ls = (LeaseSet) dbe;
// We have to be very careful here to decide whether or not to send out the leaseSet,
// to avoid anonymity vulnerabilities.
// As this is complex, lots of comments follow...
boolean isLocal = getContext().clientManager().isLocal(ls.getHash());
boolean shouldPublishLocal = isLocal && getContext().clientManager().shouldPublishLeaseSet(searchKey);
// Only answer a request for a LeaseSet if it has been published
// to us, or, if its local, if we would have published to ourselves
// Answer any request for a LeaseSet if it has been published to us.
// answerAllQueries: We are floodfill
// getReceivedAsPublished:
// false for local
// false for received over a tunnel
// false for received in response to our lookups
// true for received in a DatabaseStoreMessage unsolicited
// false for received over a client tunnel(if associated with a client, goes to client subDB)
// true for received in a DatabaseStoreMessage unsolicited(goes to main Db)
if (ls.getReceivedAsPublished()) {
// Answer anything that was stored to us directly
// (i.e. "received as published" - not the result of a query, or received
// over a client tunnel).
// This is probably because we are floodfill, but also perhaps we used to be floodfill,
// so we don't check the answerAllQueries() flag.
// Local leasesets are not handled here
//* Answer anything that was stored to us directly.
//(i.e. "received as published" - not the result of a query).
//* LeaseSets recieved over a client tunnel will be routed into subDbs.
// subDbs are responsible for publishing their "own" client LeaseSets.
//* The "main" netDb can safely store it's own copies of a LeaseSet
// belonging to a Local client, when it is published back to it. Therefore,
// they do not require special handling and are handled here.
if (_log.shouldLog(Log.INFO))
_log.info("We have the published LS " + searchKey + ", answering query");
getContext().statManager().addRateData("netDb.lookupsMatchedReceivedPublished", 1);
sendData(searchKey, ls, fromKey, toTunnel);
} else if (shouldPublishLocal && answerAllQueries()) {
// We are floodfill, and this is our local leaseset, and we publish it.
// Only send it out if it is in our estimated keyspace.
// For this, we do NOT use their dontInclude list as it can't be trusted
// (i.e. it could mess up the closeness calculation)
Set<Hash> closestHashes = getContext().netDb().findNearestRouters(searchKey,
CLOSENESS_THRESHOLD, null);
if (weAreClosest(closestHashes)) {
// It's in our keyspace, so give it to them
if (_log.shouldLog(Log.INFO))
_log.info("We have local LS " + searchKey + ", answering query, in our keyspace");
getContext().statManager().addRateData("netDb.lookupsMatchedLocalClosest", 1);
sendData(searchKey, ls, fromKey, toTunnel);
} else {
// Lie, pretend we don't have it
if (_log.shouldLog(Log.INFO))
_log.info("We have local LS " + searchKey + ", NOT answering query, out of our keyspace");
getContext().statManager().addRateData("netDb.lookupsMatchedLocalNotClosest", 1);
Set<Hash> routerHashSet = getNearestRouters(lookupType);
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
}
} else {
// It was not published to us (we looked it up, for example)
// or it's local and we aren't floodfill,
// or it's local and we don't publish it.
// Lie, pretend we don't have it
if (_log.shouldLog(Log.INFO))
_log.info("We have LS " + searchKey +
", NOT answering query - local? " + isLocal + " shouldPublish? " + shouldPublishLocal +
" RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply());
getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1);
Set<Hash> routerHashSet = getNearestRouters(lookupType);
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
}
} else if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO &&
lookupType != DatabaseLookupMessage.Type.LS) {
@ -251,10 +209,6 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
MAX_ROUTERS_RETURNED,
dontInclude);
}
private boolean weAreClosest(Set<Hash> routerHashSet) {
return routerHashSet.contains(_us);
}
private void sendData(Hash key, DatabaseEntry data, Hash toPeer, TunnelId replyTunnel) {
if (!key.equals(data.getHash())) {

View File

@ -27,6 +27,7 @@ import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.TunnelGatewayMessage;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.NetworkDatabaseFacade;
import net.i2p.router.OutNetMessage;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
@ -75,7 +76,6 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
// set if invalid store but not his fault
boolean dontBlamePeer = false;
boolean wasNew = false;
boolean blockStore = false;
RouterInfo prevNetDb = null;
Hash key = _message.getKey();
DatabaseEntry entry = _message.getEntry();
@ -91,36 +91,28 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
// local LeaseSets has changed substantially, based on the role
// being assumed.
// Role #1) The 'floodfill' netDb when the router is a FloodFill
// In this case, the router would actually de-anonymize
// the clients it is hosting if it refuses LeaseSets for
// these clients.
// The LS will be checked to make sure it arrived directly,
// and handled as a normal LS.
// Role #2) The 'floodfill' netDb when the router is *NOT* an I2P
// network Floodfill.
// In this case, the 'floodfill' netDb only stores RouterInfo.
// There is no use case for the 'floodfill' netDb to store any
// LeaseSets when the router is not a FloodFill.
// Role #3) Client netDb should only receive LeaseSets from their
// tunnels. And clients will only publish their LeaseSet
// out their client tunnel.
// In this role, the only LeaseSet that should be rejected
// is its own LeaseSet.
// In this case, the 'floodfill' netDb primarily stores RouterInfos.
// However, there are a number of normal cases where it might contain
// one or more LeaseSets:
// 1. We used to be a floodfill but aren't anymore
// 2. We performed a lookup without an active session locally(It won't be RAP)
// Role #3) Client netDb will only receive LeaseSets from their client
// tunnels, and clients will only publish their LeaseSet out
// their client tunnel.
// In this role, the only LeaseSet store that should be rejected
// is the subDb's client's own LeaseSet.
//
// ToDo: Currently, the 'floodfill' netDb will be excluded
// Currently, the 'floodfill' netDb will be excluded
// from directly receiving a client LeaseSet, due to the
// way the selection of FloodFill routers are selected
// when flooding a LS.
// But even if the host router does not directly receive the
// LeaseSets of the clients it hosts, those LeaseSets will
// usually be flooded back to it.
// Is this enough, or do we need to pierce the segmentation
// under certain conditions?
//
// ToDo: What considerations are needed for multihoming?
// with multihoming, it's really important to prevent the
// client netDb from storing the other guy's LeaseSet.
// It will confuse us badly.
LeaseSet ls = (LeaseSet) entry;
// If this was received as a response to a query,
@ -133,30 +125,18 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
// See ../HDLMJ for more info
if (!ls.getReceivedAsReply())
ls.setReceivedAsPublished();
if (_facade.isClientDb())
blockStore = false;
else if (getContext().clientManager().isLocal(key))
// Non-client context
if (_facade.floodfillEnabled() && (_fromHash != null))
blockStore = false;
else
// FloodFill disabled, but in the 'floodfill' netDb context.
// We should never get here, the 'floodfill' netDb doesn't
// store LS when FloodFill is disabled.
blockStore = true;
else
blockStore = false;
if (blockStore) {
getContext().statManager().addRateData("netDb.storeLocalLeaseSetAttempt", 1, 0);
// If we're using subdbs, store the leaseSet in the multihome DB.
// otherwise, throw rather than return, so that we send the ack below (prevent easy attack)
dontBlamePeer = true;
//if (getContext().netDbSegmentor().useSubDbs())
//getContext().multihomeNetDb().store(key, ls);
//else
if (_facade.isClientDb()) {
// This is where we deal with what happens if a client subDB tries to store
// a leaseSet which it is the owner/publisher of.
// Look up a ls hash in the netDbSegmentor, and compare it to the _facade that we have.
// If they are equal, reject the store.
if (getContext().netDbSegmentor().clientNetDB(ls.getHash()).equals(_facade)) {
getContext().statManager().addRateData("netDb.storeLocalLeaseSetToLocalClient", 1, 0);
dontBlamePeer = true;
throw new IllegalArgumentException("(dbid: " + _facade._dbid
+ ") Peer attempted to store local leaseSet: "
+ key.toBase32());
+ ") Peer attempted to store local leaseSet: "
+ key.toBase32() + " to client subDB " + _facade + "which is it's own publisher");
}
}
//boolean oldrar = ls.getReceivedAsReply();
//boolean oldrap = ls.getReceivedAsPublished();
@ -202,16 +182,6 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
invalidMessage = uce.getMessage();
dontBlamePeer = true;
} catch (IllegalArgumentException iae) {
// This is somewhat normal behavior in client netDb context,
// and safely handled.
// This is more worrisome in the floodfill netDb context.
// It is not expected to happen since we check if it was sent directly.
if (_facade.isClientDb())
if (_log.shouldInfo())
_log.info("LS Store IAE (safely handled): ", iae);
else
if (_log.shouldError())
_log.error("LS Store IAE (unexpected): ", iae);
invalidMessage = iae.getMessage();
}
} else if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
@ -281,11 +251,11 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
dontBlamePeer = true;
throw new IllegalArgumentException("Peer attempted to store our RouterInfo");
}
// If we're in the client netDb context, log a warning since
// it should be rare that RI DSM are handled in the client context.
// If we're in the client netDb context, log a warning since this is not expected.
// This is probably impossible but log it if we ever see it so it can be investigated.
if (_facade.isClientDb() && _log.shouldWarn())
_log.warn("[dbid: " + _facade._dbid
+ "]: Handling RI dbStore in client netDb context of router " + key.toBase64());
+ "]: Handling RI dbStore in client netDb context of router " + key.toBase64());
boolean shouldStore = true;
if (ri.getReceivedAsPublished()) {
// these are often just dup stores from concurrent lookups
@ -395,7 +365,7 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
+ ") and new ("
+ ri.getIdentity().getSigningPublicKey()
+ ") signing public keys do not match!");
}
}
}
}
if (shouldStore) {
@ -638,12 +608,6 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
out2 = tgm2;
}
}
if (_facade.isClientDb()) {
// We shouldn't be reaching this point given the above conditional.
_log.error("Error! SendMessageDirectJob (isEstab) attempted in Client netDb ("
+ _facade._dbid + ")! Message: " + out1);
return;
}
Job send = new SendMessageDirectJob(getContext(), out1, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY, _msgIDBloomXor);
send.runJob();
if (msg2 != null) {