forked from I2P_Developers/i2p.i2p
Compare commits
3 Commits
i2p.i2p.2.
...
i2p.i2p.ne
Author | SHA1 | Date | |
---|---|---|---|
b7202e47fc | |||
e2e6cb583d | |||
76fd9abd73 |
@ -1335,31 +1335,9 @@ public abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2
|
||||
_availabilityNotifier.stopNotifying();
|
||||
closeSocket();
|
||||
_subsessionMap.clear();
|
||||
clearOldNetDB();
|
||||
if (_sessionListener != null) _sessionListener.disconnected(this);
|
||||
}
|
||||
|
||||
private void clearOldNetDB() {
|
||||
Destination myDest = getMyDestination();
|
||||
if (myDest != null) {
|
||||
String base32 = myDest.toBase32();
|
||||
if (base32 != null) {
|
||||
String dbid = "clients_"+base32;
|
||||
// get the netDb directory
|
||||
File netDbDir = new File(_context.getConfigDir(), "netDb");
|
||||
File subNetDbDir = new File(netDbDir, dbid);
|
||||
if (subNetDbDir.exists()) {
|
||||
subNetDbDir.delete();
|
||||
}
|
||||
File baseNetDbDir = new File(_context.getConfigDir(), "netDb");
|
||||
File baseSubNetDbDir = new File(baseNetDbDir, dbid);
|
||||
if (baseSubNetDbDir.exists()) {
|
||||
baseSubNetDbDir.delete();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the socket carefully.
|
||||
*/
|
||||
|
@ -0,0 +1,28 @@
|
||||
package net.i2p.router.networkdb;
|
||||
|
||||
import net.i2p.router.JobImpl;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseSegmentor;
|
||||
|
||||
public class CleanupNetDbJob extends JobImpl {
|
||||
private final RouterContext ctx;
|
||||
private static final long RERUN_DELAY_MS = 1 * 60 * 1000;
|
||||
|
||||
public CleanupNetDbJob(RouterContext context) {
|
||||
super(context);
|
||||
ctx = context;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "CleanupNetDbJob";
|
||||
}
|
||||
|
||||
@Override
|
||||
public void runJob() {
|
||||
FloodfillNetworkDatabaseSegmentor fnds = (FloodfillNetworkDatabaseSegmentor) ctx.netDb();
|
||||
fnds.removeDeadSubDbs(ctx.clientManager().listClients());
|
||||
requeue(RERUN_DELAY_MS);
|
||||
}
|
||||
|
||||
}
|
@ -29,6 +29,7 @@ import net.i2p.util.ConcurrentHashSet;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.RandomSource;
|
||||
import net.i2p.util.SystemVersion;
|
||||
//import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseSegmentor;
|
||||
|
||||
/**
|
||||
* The network database
|
||||
|
@ -19,6 +19,7 @@ import net.i2p.data.TunnelId;
|
||||
import net.i2p.data.router.RouterInfo;
|
||||
import net.i2p.router.Job;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.networkdb.CleanupNetDbJob;
|
||||
import net.i2p.router.networkdb.reseed.ReseedChecker;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
@ -36,6 +37,7 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
||||
_context = context;
|
||||
FloodfillNetworkDatabaseFacade subdb = new FloodfillNetworkDatabaseFacade(_context, MAIN_DBID);
|
||||
_subDBs.put(MAIN_DBID, subdb);
|
||||
_context.jobQueue().addJob(new CleanupNetDbJob(_context));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -112,6 +114,25 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized void remove(String dbid) {
|
||||
if (dbid != null)
|
||||
if (dbid.endsWith(".i2p"))
|
||||
dbid = "clients_" + dbid;
|
||||
else if (dbid.equals(""))
|
||||
dbid = MAIN_DBID;
|
||||
GetSubNetDB(dbid).shutdown();
|
||||
_subDBs.remove(dbid);
|
||||
}
|
||||
|
||||
public synchronized void removeDeadSubDbs(Set<Destination> clientDests) {
|
||||
for (String dbid : _subDBs.keySet()) {
|
||||
for (Destination db : clientDests) {
|
||||
if (!db.toBase32().equals(dbid))
|
||||
remove(dbid);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This maybe could be shorter than
|
||||
* RepublishLeaseSetJob.REPUBLISH_LEASESET_TIMEOUT,
|
||||
|
@ -51,6 +51,8 @@ import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.crypto.FamilyKeyCrypto;
|
||||
import net.i2p.router.networkdb.PublishLocalRouterInfoJob;
|
||||
import net.i2p.router.networkdb.reseed.ReseedChecker;
|
||||
import net.i2p.router.networkdb.kademlia.PersistentDataStore;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseSegmentor;
|
||||
import net.i2p.router.peermanager.PeerProfile;
|
||||
import net.i2p.util.ConcurrentHashSet;
|
||||
import net.i2p.util.Log;
|
||||
@ -313,7 +315,9 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
BUCKET_SIZE, KAD_B, new RejectTrimmer<Hash>());
|
||||
_dbDir = getDbDir();
|
||||
try {
|
||||
_ds = new PersistentDataStore(_context, _dbDir, this);
|
||||
if (_dbid == null || _dbid.equals(FloodfillNetworkDatabaseSegmentor.MAIN_DBID) || _dbid.isEmpty()) {
|
||||
_ds = new PersistentDataStore(_context, "", this);
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
throw new RuntimeException("Unable to initialize netdb storage", ioe);
|
||||
}
|
||||
|
Reference in New Issue
Block a user