* Throttle:
- Correctly check inbound and outbound total bw limits separately - Fix up and actually use the tunnel.participatingMessageCount stat, favor it if lower than the total bw stat, so that client traffic isn't included for throttle decisions - Reduce min message count from 60 to 40 * Tunnel Dispatcher: - Add tunnel.participatingBandwidth stat - Remove all 3h and 24h stats
This commit is contained in:
11
history.txt
11
history.txt
@ -1,3 +1,14 @@
|
||||
2008-09-18 zzz
|
||||
* Throttle:
|
||||
- Correctly check inbound and outbound total bw limits separately
|
||||
- Fix up and actually use the tunnel.participatingMessageCount stat,
|
||||
favor it if lower than the total bw stat, so that
|
||||
client traffic isn't included for throttle decisions
|
||||
- Reduce min message count from 60 to 40
|
||||
* Tunnel Dispatcher:
|
||||
- Add tunnel.participatingBandwidth stat
|
||||
- Remove all 3h and 24h stats
|
||||
|
||||
2008-09-15 zzz
|
||||
* FloodOnlySearchJob:
|
||||
- Ask non-floodfill peers if we don't know any floodfills
|
||||
|
@ -1112,6 +1112,15 @@ public class Router {
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
public int get1sRateIn() {
|
||||
RouterContext ctx = _context;
|
||||
if (ctx != null) {
|
||||
FIFOBandwidthLimiter bw = ctx.bandwidthLimiter();
|
||||
if (bw != null)
|
||||
return (int) bw.getReceiveBps();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
public int get15sRate() { return get15sRate(false); }
|
||||
public int get15sRate(boolean outboundOnly) {
|
||||
@ -1127,6 +1136,15 @@ public class Router {
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
public int get15sRateIn() {
|
||||
RouterContext ctx = _context;
|
||||
if (ctx != null) {
|
||||
FIFOBandwidthLimiter bw = ctx.bandwidthLimiter();
|
||||
if (bw != null)
|
||||
return (int) bw.getReceiveBps15s();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
public int get1mRate() { return get1mRate(false); }
|
||||
public int get1mRate(boolean outboundOnly) {
|
||||
@ -1148,6 +1166,20 @@ public class Router {
|
||||
recv = (int)rs.getRate(1*60*1000).getAverageValue();
|
||||
return Math.max(send, recv);
|
||||
}
|
||||
public int get1mRateIn() {
|
||||
RouterContext ctx = _context;
|
||||
if (ctx == null)
|
||||
return 0;
|
||||
StatManager mgr = ctx.statManager();
|
||||
if (mgr == null)
|
||||
return 0;
|
||||
RateStat rs = mgr.getRate("bw.recvRate");
|
||||
int recv = 0;
|
||||
if (rs != null)
|
||||
recv = (int)rs.getRate(1*60*1000).getAverageValue();
|
||||
return recv;
|
||||
}
|
||||
|
||||
public int get5mRate() { return get5mRate(false); }
|
||||
public int get5mRate(boolean outboundOnly) {
|
||||
int send = 0;
|
||||
|
@ -182,9 +182,10 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
return TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT;
|
||||
}
|
||||
} else {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Accepting tunnel request, since 60m test time average is " + avg10m
|
||||
+ " and past 1m only has " + avg1m + ")");
|
||||
// not yet...
|
||||
//if (_log.shouldLog(Log.INFO))
|
||||
// _log.info("Accepting tunnel request, since 60m test time average is " + avg10m
|
||||
// + " and past 1m only has " + avg1m + ")");
|
||||
}
|
||||
}
|
||||
|
||||
@ -201,7 +202,6 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
|
||||
}
|
||||
} catch (NumberFormatException nfe) {
|
||||
// no default, ignore it
|
||||
}
|
||||
}
|
||||
|
||||
@ -260,14 +260,15 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
// ok, all is well, let 'er in
|
||||
_context.statManager().addRateData("tunnel.bytesAllocatedAtAccept", (long)bytesAllocated, 60*10*1000);
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Accepting a new tunnel request (now allocating " + bytesAllocated + " bytes across " + numTunnels
|
||||
+ " tunnels with lag of " + lag + ")");
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("Accepting a new tunnel request (now allocating " + bytesAllocated + " bytes across " + numTunnels
|
||||
// + " tunnels with lag of " + lag + ")");
|
||||
return TUNNEL_ACCEPT;
|
||||
}
|
||||
|
||||
private static final int DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE = 60; // .1KBps
|
||||
private static final int DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE = 40; // .067KBps
|
||||
private static final int MIN_AVAILABLE_BPS = 4*1024; // always leave at least 4KBps free when allowing
|
||||
private static final String LIMIT_STR = "Rejecting tunnels: Bandwidth limit";
|
||||
|
||||
/**
|
||||
* with bytesAllocated already accounted for across the numTunnels existing
|
||||
@ -276,46 +277,61 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
*
|
||||
*/
|
||||
private boolean allowTunnel(double bytesAllocated, int numTunnels) {
|
||||
int maxKBps = Math.min(_context.bandwidthLimiter().getOutboundKBytesPerSecond(), _context.bandwidthLimiter().getInboundKBytesPerSecond());
|
||||
int used1s = _context.router().get1sRate(); // dont throttle on the 1s rate, its too volatile
|
||||
int used15s = _context.router().get15sRate();
|
||||
int used1m = _context.router().get1mRate(); // dont throttle on the 1m rate, its too slow
|
||||
int used = Math.min(used15s,used1s);
|
||||
int maxKBpsIn = _context.bandwidthLimiter().getInboundKBytesPerSecond();
|
||||
int maxKBpsOut = _context.bandwidthLimiter().getOutboundKBytesPerSecond();
|
||||
int maxKBps = Math.min(maxKBpsIn, maxKBpsOut);
|
||||
int usedIn = Math.min(_context.router().get1sRateIn(), _context.router().get15sRateIn());
|
||||
int usedOut = Math.min(_context.router().get1sRate(true), _context.router().get15sRate(true));
|
||||
int used = Math.max(usedIn, usedOut);
|
||||
int used1mIn = _context.router().get1mRateIn();
|
||||
int used1mOut = _context.router().get1mRate(true);
|
||||
|
||||
// Check the inbound and outbound total bw available (separately)
|
||||
int availBps = (maxKBpsIn*1024) - usedIn;
|
||||
availBps = Math.min(availBps, (maxKBpsOut*1024) - usedOut);
|
||||
if (availBps < MIN_AVAILABLE_BPS) {
|
||||
if (_log.shouldLog(Log.WARN)) _log.warn("Reject, avail (" + availBps + ") less than min");
|
||||
setTunnelStatus(LIMIT_STR);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Now compute the share bw available, using
|
||||
// the bytes-allocated estimate for the participating tunnels
|
||||
// (if lower than the total bw, which it should be),
|
||||
// since some of the total used bandwidth may be for local clients
|
||||
double share = _context.router().getSharePercentage();
|
||||
int availBps = (int)(((maxKBps*1024)*share) - used); //(int)(((maxKBps*1024) - used) * getSharePercentage());
|
||||
used = Math.min(used, (int) (bytesAllocated / (10*60)));
|
||||
availBps = Math.min(availBps, (int)(((maxKBps*1024)*share) - used));
|
||||
|
||||
// Write stats before making decisions
|
||||
_context.statManager().addRateData("router.throttleTunnelBytesUsed", used, maxKBps);
|
||||
_context.statManager().addRateData("router.throttleTunnelBytesAllowed", availBps, (long)bytesAllocated);
|
||||
|
||||
long overage = used1m - (maxKBps*1024);
|
||||
// Now see if 1m rates are too high
|
||||
long overage = used1mIn - (maxKBpsIn*1024);
|
||||
overage = Math.max(overage, used1mOut - (maxKBpsOut*1024));
|
||||
if ( (overage > 0) &&
|
||||
((overage/(float)(maxKBps*1024f)) > _context.random().nextFloat()) ) {
|
||||
|
||||
if (_log.shouldLog(Log.WARN)) _log.warn("Reject tunnel, 1m rate (" + used1m + ") indicates overload.");
|
||||
if (_log.shouldLog(Log.WARN)) _log.warn("Reject tunnel, 1m rate (" + overage + " over) indicates overload.");
|
||||
setTunnelStatus(LIMIT_STR);
|
||||
return false;
|
||||
}
|
||||
|
||||
// if (true) {
|
||||
// ok, ignore any predictions of 'bytesAllocated', since that makes poorly
|
||||
// grounded conclusions about future use (or even the bursty use). Instead,
|
||||
// simply say "do we have the bw to handle a new request"?
|
||||
float maxBps = maxKBps * 1024f;
|
||||
float pctFull = (maxBps - availBps) / (maxBps);
|
||||
double probReject = Math.pow(pctFull, 16); // steep curve
|
||||
double rand = _context.random().nextFloat();
|
||||
boolean reject = (availBps < MIN_AVAILABLE_BPS) || (rand <= probReject);
|
||||
boolean reject = rand <= probReject;
|
||||
if (reject && _log.shouldLog(Log.WARN))
|
||||
_log.warn("reject = " + reject + " avail/maxK/used " + availBps + "/" + maxKBps + "/"
|
||||
_log.warn("Reject avail/maxK/used " + availBps + "/" + maxKBps + "/"
|
||||
+ used + " pReject = " + probReject + " pFull = " + pctFull + " numTunnels = " + numTunnels
|
||||
+ "rand = " + rand + " est = " + bytesAllocated + " share = " + (float)share);
|
||||
+ " rand = " + rand + " est = " + bytesAllocated);
|
||||
else if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("reject = " + reject + " avail/maxK/used " + availBps + "/" + maxKBps + "/"
|
||||
_log.debug("Accept avail/maxK/used " + availBps + "/" + maxKBps + "/"
|
||||
+ used + " pReject = " + probReject + " pFull = " + pctFull + " numTunnels = " + numTunnels
|
||||
+ "rand = " + rand + " est = " + bytesAllocated + " share = " + (float)share);
|
||||
+ " rand = " + rand + " est = " + bytesAllocated);
|
||||
if (probReject >= 0.9)
|
||||
setTunnelStatus("Rejecting tunnels: Bandwidth limit");
|
||||
setTunnelStatus(LIMIT_STR);
|
||||
else if (probReject >= 0.5)
|
||||
setTunnelStatus("Rejecting " + ((int)(100.0*probReject)) + "% of tunnels: Bandwidth limit");
|
||||
else if(probReject >= 0.1)
|
||||
@ -323,7 +339,6 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
else
|
||||
setTunnelStatus("Accepting tunnels");
|
||||
return !reject;
|
||||
// }
|
||||
|
||||
|
||||
/*
|
||||
|
@ -17,7 +17,7 @@ import net.i2p.CoreVersion;
|
||||
public class RouterVersion {
|
||||
public final static String ID = "$Revision: 1.548 $ $Date: 2008-06-07 23:00:00 $";
|
||||
public final static String VERSION = "0.6.3";
|
||||
public final static long BUILD = 5;
|
||||
public final static long BUILD = 6;
|
||||
public static void main(String args[]) {
|
||||
System.out.println("I2P Router version: " + VERSION + "-" + BUILD);
|
||||
System.out.println("Router ID: " + RouterVersion.ID);
|
||||
|
@ -58,58 +58,61 @@ public class TunnelDispatcher implements Service {
|
||||
_leaveJob = new LeaveTunnel(ctx);
|
||||
ctx.statManager().createRateStat("tunnel.participatingTunnels",
|
||||
"How many tunnels are we participating in?", "Tunnels",
|
||||
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 60*1000, 10*60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.dispatchOutboundPeer",
|
||||
"How many messages we send out a tunnel targetting a peer?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 10*60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.dispatchOutboundTunnel",
|
||||
"How many messages we send out a tunnel targetting a tunnel?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 10*60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.dispatchInbound",
|
||||
"How many messages we send through our tunnel gateway?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 10*60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.dispatchParticipant",
|
||||
"How many messages we send through a tunnel we are participating in?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 10*60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.dispatchEndpoint",
|
||||
"How many messages we receive as the outbound endpoint of a tunnel?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 10*60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.joinOutboundGateway",
|
||||
"How many tunnels we join as the outbound gateway?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 10*60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.joinOutboundGatewayZeroHop",
|
||||
"How many zero hop tunnels we join as the outbound gateway?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 10*60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.joinInboundEndpoint",
|
||||
"How many tunnels we join as the inbound endpoint?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 10*60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.joinInboundEndpointZeroHop",
|
||||
"How many zero hop tunnels we join as the inbound endpoint?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 10*60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.joinParticipant",
|
||||
"How many tunnels we join as a participant?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 10*60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.joinOutboundEndpoint",
|
||||
"How many tunnels we join as the outbound endpoint?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 10*60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.joinInboundGateway",
|
||||
"How many tunnels we join as the inbound gateway?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 10*60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.dispatchGatewayTime",
|
||||
"How long it takes to dispatch a TunnelGatewayMessage", "Tunnels",
|
||||
new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.dispatchDataTime",
|
||||
"How long it takes to dispatch a TunnelDataMessage", "Tunnels",
|
||||
new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.dispatchOutboundTime",
|
||||
"How long it takes to dispatch an outbound message", "Tunnels",
|
||||
new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.dispatchOutboundZeroHopTime",
|
||||
"How long it takes to dispatch an outbound message through a zero hop tunnel", "Tunnels",
|
||||
new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.participatingBandwidth",
|
||||
"Participating traffic", "Tunnels",
|
||||
new long[] { 60*1000l, 60*10*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.participatingMessageCount",
|
||||
"How many messages are sent through a participating tunnel?", "Tunnels",
|
||||
new long[] { 60*1000l, 60*10*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
new long[] { 60*1000l, 60*10*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.ownedMessageCount",
|
||||
"How many messages are sent through a tunnel we created (period == failures)?", "Tunnels",
|
||||
new long[] { 60*1000l, 10*60*1000l, 60*60*1000l });
|
||||
@ -535,31 +538,35 @@ public class TunnelDispatcher implements Service {
|
||||
|
||||
/**
|
||||
* Generate a current estimate of usage per-participating-tunnel lifetime.
|
||||
* The stats code calls this every 20s.
|
||||
* The router code calls this every 20s.
|
||||
* This is better than waiting until the tunnel expires to update the rate,
|
||||
* as we want this to be current because it's an important part of
|
||||
* the throttle code.
|
||||
* Stay a little conservative by taking the counts only for tunnels 1-10m old
|
||||
* and computing the average from that.
|
||||
*/
|
||||
public void updateParticipatingStats() {
|
||||
List participating = listParticipatingTunnels();
|
||||
int size = participating.size();
|
||||
long count = 0;
|
||||
long bw = 0;
|
||||
long tcount = 0;
|
||||
long tooYoung = _context.clock().now() - 60*1000;
|
||||
long tooOld = tooYoung - 9*60*1000;
|
||||
for (int i = 0; i < size; i++) {
|
||||
HopConfig cfg = (HopConfig)participating.get(i);
|
||||
long c = cfg.getRecentMessagesCount();
|
||||
bw += c;
|
||||
long created = cfg.getCreation();
|
||||
if (created > tooYoung || created < tooOld)
|
||||
continue;
|
||||
tcount++;
|
||||
count += c;
|
||||
}
|
||||
// This is called every 20s from Router.java, with 11m tunnel lifetime, so *= 33
|
||||
if (tcount > 0)
|
||||
count = count * 33 / tcount;
|
||||
count = count * 30 / tcount;
|
||||
_context.statManager().addRateData("tunnel.participatingMessageCount", count, 20*1000);
|
||||
_context.statManager().addRateData("tunnel.participatingBandwidth", bw*1024/20, 20*1000);
|
||||
_context.statManager().addRateData("tunnel.participatingTunnels", size, 0);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user