2005-10-19 jrandom
* Bugfix for the auto-update code to handle different usage patterns * Decreased the addressbook recheck frequency to once every 12 hours instead of hourly. * Handle dynamically changing the HMAC size (again, unless your nym is toad or jrandom, ignore this ;) * Cleaned up some synchronization/locking code
This commit is contained in:
@ -143,7 +143,7 @@ public class Daemon {
|
||||
defaultSettings.put("subscriptions", "subscriptions.txt");
|
||||
defaultSettings.put("etags", "etags");
|
||||
defaultSettings.put("last_modified", "last_modified");
|
||||
defaultSettings.put("update_delay", "1");
|
||||
defaultSettings.put("update_delay", "12");
|
||||
|
||||
File homeFile = new File(home);
|
||||
if (!homeFile.exists()) {
|
||||
@ -188,4 +188,4 @@ public class Daemon {
|
||||
_instance.notifyAll();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -31,7 +31,9 @@ public class UpdateHandler {
|
||||
|
||||
private static final String SIGNED_UPDATE_FILE = "i2pupdate.sud";
|
||||
|
||||
public UpdateHandler() {}
|
||||
public UpdateHandler() {
|
||||
this(ContextHelper.getContext(null));
|
||||
}
|
||||
public UpdateHandler(RouterContext ctx) {
|
||||
_context = ctx;
|
||||
_log = ctx.logManager().getLog(UpdateHandler.class);
|
||||
|
@ -128,10 +128,13 @@ public class HMACSHA256Generator {
|
||||
if (_available.size() > 0)
|
||||
return (HMac)_available.remove(0);
|
||||
}
|
||||
// the HMAC is hardcoded to use SHA256 digest size
|
||||
// for backwards compatability. next time we have a backwards
|
||||
// incompatible change, we should update this by removing ", 32"
|
||||
if (_useMD5)
|
||||
return new HMac(new MD5Digest());
|
||||
return new HMac(new MD5Digest(), 32);
|
||||
else
|
||||
return new HMac(new SHA256Digest());
|
||||
return new HMac(new SHA256Digest(), 32);
|
||||
}
|
||||
private void release(HMac mac) {
|
||||
synchronized (_available) {
|
||||
|
@ -115,27 +115,29 @@ public class BufferedStatLog implements StatLog {
|
||||
int writeStart = -1;
|
||||
int writeEnd = -1;
|
||||
while (true) {
|
||||
synchronized (_events) {
|
||||
if (_eventNext > _lastWrite) {
|
||||
if (_eventNext - _lastWrite < _flushFrequency)
|
||||
try { _events.wait(30*1000); } catch (InterruptedException ie) {}
|
||||
} else {
|
||||
if (_events.length - 1 - _lastWrite + _eventNext < _flushFrequency)
|
||||
try { _events.wait(30*1000); } catch (InterruptedException ie) {}
|
||||
try {
|
||||
synchronized (_events) {
|
||||
if (_eventNext > _lastWrite) {
|
||||
if (_eventNext - _lastWrite < _flushFrequency)
|
||||
_events.wait(30*1000);
|
||||
} else {
|
||||
if (_events.length - 1 - _lastWrite + _eventNext < _flushFrequency)
|
||||
_events.wait(30*1000);
|
||||
}
|
||||
writeStart = (_lastWrite + 1) % _events.length;
|
||||
writeEnd = _eventNext;
|
||||
_lastWrite = (writeEnd == 0 ? _events.length-1 : writeEnd - 1);
|
||||
}
|
||||
writeStart = (_lastWrite + 1) % _events.length;
|
||||
writeEnd = _eventNext;
|
||||
_lastWrite = (writeEnd == 0 ? _events.length-1 : writeEnd - 1);
|
||||
}
|
||||
if (writeStart != writeEnd) {
|
||||
try {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("writing " + writeStart +"->"+ writeEnd);
|
||||
writeEvents(writeStart, writeEnd);
|
||||
} catch (Exception e) {
|
||||
_log.error("error writing " + writeStart +"->"+ writeEnd, e);
|
||||
if (writeStart != writeEnd) {
|
||||
try {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("writing " + writeStart +"->"+ writeEnd);
|
||||
writeEvents(writeStart, writeEnd);
|
||||
} catch (Exception e) {
|
||||
_log.error("error writing " + writeStart +"->"+ writeEnd, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (InterruptedException ie) {}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -58,9 +58,14 @@ implements Mac
|
||||
|
||||
public HMac(
|
||||
Digest digest)
|
||||
{
|
||||
this(digest, digest.getDigestSize());
|
||||
}
|
||||
public HMac(
|
||||
Digest digest, int sz)
|
||||
{
|
||||
this.digest = digest;
|
||||
digestSize = digest.getDigestSize();
|
||||
this.digestSize = sz;
|
||||
}
|
||||
|
||||
public String getAlgorithmName()
|
||||
@ -141,7 +146,7 @@ implements Mac
|
||||
byte[] out,
|
||||
int outOff)
|
||||
{
|
||||
byte[] tmp = acquireTmp();
|
||||
byte[] tmp = acquireTmp(digestSize);
|
||||
//byte[] tmp = new byte[digestSize];
|
||||
digest.doFinal(tmp, 0);
|
||||
|
||||
@ -156,23 +161,27 @@ implements Mac
|
||||
return len;
|
||||
}
|
||||
|
||||
private static ArrayList _tmpBuf = new ArrayList();
|
||||
private static byte[] acquireTmp() {
|
||||
/**
|
||||
* list of buffers - index 0 is the cache for 32 byte arrays, while index 1 is the cache for 16 byte arrays
|
||||
*/
|
||||
private static ArrayList _tmpBuf[] = new ArrayList[] { new ArrayList(), new ArrayList() };
|
||||
private static byte[] acquireTmp(int sz) {
|
||||
byte rv[] = null;
|
||||
synchronized (_tmpBuf) {
|
||||
if (_tmpBuf.size() > 0)
|
||||
rv = (byte[])_tmpBuf.remove(0);
|
||||
synchronized (_tmpBuf[sz == 32 ? 0 : 1]) {
|
||||
if (_tmpBuf[sz == 32 ? 0 : 1].size() > 0)
|
||||
rv = (byte[])_tmpBuf[sz == 32 ? 0 : 1].remove(0);
|
||||
}
|
||||
if (rv != null)
|
||||
Arrays.fill(rv, (byte)0x0);
|
||||
else
|
||||
rv = new byte[32]; // hard coded against SHA256 (should be digestSize)
|
||||
rv = new byte[sz];
|
||||
return rv;
|
||||
}
|
||||
private static void releaseTmp(byte buf[]) {
|
||||
synchronized (_tmpBuf) {
|
||||
if (_tmpBuf.size() < 100)
|
||||
_tmpBuf.add((Object)buf);
|
||||
if (buf == null) return;
|
||||
synchronized (_tmpBuf[buf.length == 32 ? 0 : 1]) {
|
||||
if (_tmpBuf[buf.length == 32 ? 0 : 1].size() < 100)
|
||||
_tmpBuf[buf.length == 32 ? 0 : 1].add((Object)buf);
|
||||
}
|
||||
}
|
||||
|
||||
|
10
history.txt
10
history.txt
@ -1,4 +1,12 @@
|
||||
$Id: history.txt,v 1.299 2005/10/17 19:39:46 jrandom Exp $
|
||||
$Id: history.txt,v 1.300 2005/10/17 22:14:01 dust Exp $
|
||||
|
||||
2005-10-19 jrandom
|
||||
* Bugfix for the auto-update code to handle different usage patterns
|
||||
* Decreased the addressbook recheck frequency to once every 12 hours
|
||||
instead of hourly.
|
||||
* Handle dynamically changing the HMAC size (again, unless your nym is
|
||||
toad or jrandom, ignore this ;)
|
||||
* Cleaned up some synchronization/locking code
|
||||
|
||||
2005-10-17 dust
|
||||
* Exchange the remaining URL with EepGet in Sucker.
|
||||
|
@ -1,3 +1,4 @@
|
||||
#!/bin/sh
|
||||
export I2P=~i2p/i2p
|
||||
#export I2P=~i2p/i2p
|
||||
export I2P=.
|
||||
java -cp $I2P/lib/i2p.jar net.i2p.util.EepGet $*
|
||||
|
@ -135,6 +135,7 @@ public class JobQueue {
|
||||
|
||||
long numReady = 0;
|
||||
boolean alreadyExists = false;
|
||||
boolean dropped = false;
|
||||
synchronized (_jobLock) {
|
||||
if (_readyJobs.contains(job))
|
||||
alreadyExists = true;
|
||||
@ -144,34 +145,33 @@ public class JobQueue {
|
||||
alreadyExists = true;
|
||||
}
|
||||
|
||||
_context.statManager().addRateData("jobQueue.readyJobs", numReady, 0);
|
||||
if (shouldDrop(job, numReady)) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Dropping job due to overload! # ready jobs: "
|
||||
+ numReady + ": job = " + job);
|
||||
job.dropped();
|
||||
_context.statManager().addRateData("jobQueue.droppedJobs", 1, 1);
|
||||
_jobLock.notifyAll();
|
||||
return;
|
||||
}
|
||||
|
||||
if (!alreadyExists) {
|
||||
if (job.getTiming().getStartAfter() <= _context.clock().now()) {
|
||||
// don't skew us - its 'start after' its been queued, or later
|
||||
job.getTiming().setStartAfter(_context.clock().now());
|
||||
if (job instanceof JobImpl)
|
||||
((JobImpl)job).madeReady();
|
||||
_readyJobs.add(job);
|
||||
_jobLock.notifyAll();
|
||||
} else {
|
||||
_timedJobs.add(job);
|
||||
_jobLock.notifyAll();
|
||||
}
|
||||
dropped = true;
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Not adding already enqueued job " + job.getName());
|
||||
if (!alreadyExists) {
|
||||
if (job.getTiming().getStartAfter() <= _context.clock().now()) {
|
||||
// don't skew us - its 'start after' its been queued, or later
|
||||
job.getTiming().setStartAfter(_context.clock().now());
|
||||
if (job instanceof JobImpl)
|
||||
((JobImpl)job).madeReady();
|
||||
_readyJobs.add(job);
|
||||
} else {
|
||||
_timedJobs.add(job);
|
||||
}
|
||||
}
|
||||
}
|
||||
_jobLock.notifyAll();
|
||||
}
|
||||
|
||||
_context.statManager().addRateData("jobQueue.readyJobs", numReady, 0);
|
||||
if (dropped) {
|
||||
_context.statManager().addRateData("jobQueue.droppedJobs", 1, 1);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Dropping job due to overload! # ready jobs: "
|
||||
+ numReady + ": job = " + job);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@ -329,13 +329,15 @@ public class JobQueue {
|
||||
*/
|
||||
Job getNext() {
|
||||
while (_alive) {
|
||||
synchronized (_jobLock) {
|
||||
if (_readyJobs.size() > 0) {
|
||||
return (Job)_readyJobs.remove(0);
|
||||
} else {
|
||||
try { _jobLock.wait(); } catch (InterruptedException ie) {}
|
||||
try {
|
||||
synchronized (_jobLock) {
|
||||
if (_readyJobs.size() > 0) {
|
||||
return (Job)_readyJobs.remove(0);
|
||||
} else {
|
||||
_jobLock.wait();
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (InterruptedException ie) {}
|
||||
}
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("No longer alive, returning null");
|
||||
@ -403,50 +405,50 @@ public class JobQueue {
|
||||
long now = _context.clock().now();
|
||||
long timeToWait = -1;
|
||||
ArrayList toAdd = null;
|
||||
synchronized (_jobLock) {
|
||||
for (int i = 0; i < _timedJobs.size(); i++) {
|
||||
Job j = (Job)_timedJobs.get(i);
|
||||
// find jobs due to start before now
|
||||
long timeLeft = j.getTiming().getStartAfter() - now;
|
||||
if (timeLeft <= 0) {
|
||||
if (j instanceof JobImpl)
|
||||
((JobImpl)j).madeReady();
|
||||
try {
|
||||
synchronized (_jobLock) {
|
||||
for (int i = 0; i < _timedJobs.size(); i++) {
|
||||
Job j = (Job)_timedJobs.get(i);
|
||||
// find jobs due to start before now
|
||||
long timeLeft = j.getTiming().getStartAfter() - now;
|
||||
if (timeLeft <= 0) {
|
||||
if (j instanceof JobImpl)
|
||||
((JobImpl)j).madeReady();
|
||||
|
||||
if (toAdd == null) toAdd = new ArrayList(4);
|
||||
toAdd.add(j);
|
||||
_timedJobs.remove(i);
|
||||
i--; // so the index stays consistent
|
||||
} else {
|
||||
if ( (timeToWait <= 0) || (timeLeft < timeToWait) )
|
||||
timeToWait = timeLeft;
|
||||
if (toAdd == null) toAdd = new ArrayList(4);
|
||||
toAdd.add(j);
|
||||
_timedJobs.remove(i);
|
||||
i--; // so the index stays consistent
|
||||
} else {
|
||||
if ( (timeToWait <= 0) || (timeLeft < timeToWait) )
|
||||
timeToWait = timeLeft;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (toAdd != null) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Not waiting - we have " + toAdd.size() + " newly ready jobs");
|
||||
// rather than addAll, which allocs a byte array rv before adding,
|
||||
// we iterate, since toAdd is usually going to only be 1 or 2 entries
|
||||
// and since readyJobs will often have the space, we can avoid the
|
||||
// extra alloc. (no, i'm not just being insane - i'm updating this based
|
||||
// on some profiling data ;)
|
||||
for (int i = 0; i < toAdd.size(); i++)
|
||||
_readyJobs.add(toAdd.get(i));
|
||||
_jobLock.notifyAll();
|
||||
} else {
|
||||
if (timeToWait < 0)
|
||||
timeToWait = 30*1000;
|
||||
else if (timeToWait < 10)
|
||||
timeToWait = 10;
|
||||
else if (timeToWait > 10*1000)
|
||||
timeToWait = 10*1000;
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("Waiting " + timeToWait + " before rechecking the timed queue");
|
||||
try {
|
||||
if (toAdd != null) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Not waiting - we have " + toAdd.size() + " newly ready jobs");
|
||||
// rather than addAll, which allocs a byte array rv before adding,
|
||||
// we iterate, since toAdd is usually going to only be 1 or 2 entries
|
||||
// and since readyJobs will often have the space, we can avoid the
|
||||
// extra alloc. (no, i'm not just being insane - i'm updating this based
|
||||
// on some profiling data ;)
|
||||
for (int i = 0; i < toAdd.size(); i++)
|
||||
_readyJobs.add(toAdd.get(i));
|
||||
_jobLock.notifyAll();
|
||||
} else {
|
||||
if (timeToWait < 0)
|
||||
timeToWait = 30*1000;
|
||||
else if (timeToWait < 10)
|
||||
timeToWait = 10;
|
||||
else if (timeToWait > 10*1000)
|
||||
timeToWait = 10*1000;
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("Waiting " + timeToWait + " before rechecking the timed queue");
|
||||
_jobLock.wait(timeToWait);
|
||||
} catch (InterruptedException ie) {}
|
||||
}
|
||||
} // synchronize (_jobLock)
|
||||
}
|
||||
} // synchronize (_jobLock)
|
||||
} catch (InterruptedException ie) {}
|
||||
} // while (_alive)
|
||||
} catch (Throwable t) {
|
||||
_context.clock().removeUpdateListener(this);
|
||||
|
@ -15,9 +15,9 @@ import net.i2p.CoreVersion;
|
||||
*
|
||||
*/
|
||||
public class RouterVersion {
|
||||
public final static String ID = "$Revision: 1.270 $ $Date: 2005/10/14 08:48:05 $";
|
||||
public final static String ID = "$Revision: 1.271 $ $Date: 2005/10/17 19:39:46 $";
|
||||
public final static String VERSION = "0.6.1.3";
|
||||
public final static long BUILD = 1;
|
||||
public final static long BUILD = 2;
|
||||
public static void main(String args[]) {
|
||||
System.out.println("I2P Router version: " + VERSION + "-" + BUILD);
|
||||
System.out.println("Router ID: " + RouterVersion.ID);
|
||||
|
Reference in New Issue
Block a user