* Summary bar:
- Add messages when dropping tunnel requests due to load
This commit is contained in:
@ -47,4 +47,5 @@ public interface RouterThrottle {
|
||||
* Message on the state of participating tunnel acceptance
|
||||
*/
|
||||
public String getTunnelStatus();
|
||||
public void setTunnelStatus(String msg);
|
||||
}
|
||||
|
@ -94,6 +94,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
return TunnelHistory.TUNNEL_REJECT_CRIT;
|
||||
|
||||
long lag = _context.jobQueue().getMaxLag();
|
||||
// reject here if lag too high???
|
||||
RateStat rs = _context.statManager().getRate("transport.sendProcessingTime");
|
||||
Rate r = null;
|
||||
if (rs != null)
|
||||
@ -104,7 +105,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
_log.debug("Refusing tunnel request with the job lag of " + lag
|
||||
+ "since the 1 minute message processing time is too slow (" + processTime + ")");
|
||||
_context.statManager().addRateData("router.throttleTunnelProcessingTime1m", (long)processTime, (long)processTime);
|
||||
setTunnelStatus("Rejecting tunnels: High processing time");
|
||||
setTunnelStatus("Rejecting tunnels: High message delay");
|
||||
return TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
|
||||
}
|
||||
|
||||
@ -431,7 +432,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
setTunnelStatus("Rejecting tunnels");
|
||||
}
|
||||
|
||||
private void setTunnelStatus(String msg) {
|
||||
public void setTunnelStatus(String msg) {
|
||||
_tunnelStatus = msg;
|
||||
}
|
||||
|
||||
|
@ -103,6 +103,9 @@ class BuildHandler {
|
||||
}
|
||||
} while (_inboundBuildMessages.size() > 0);
|
||||
|
||||
if (dropExpired > 0)
|
||||
_context.throttle().setTunnelStatus("Dropping tunnel requests: Too slow");
|
||||
|
||||
// now pull off the oldest requests first (we're doing a tail-drop
|
||||
// when adding)
|
||||
for (int i = 0; i < toHandle && _inboundBuildMessages.size() > 0; i++)
|
||||
@ -310,6 +313,7 @@ class BuildHandler {
|
||||
|
||||
if (timeSinceReceived > (BuildRequestor.REQUEST_TIMEOUT*3)) {
|
||||
// don't even bother, since we are so overloaded locally
|
||||
_context.throttle().setTunnelStatus("Dropping tunnel requests: Overloaded");
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Not even trying to handle/decrypt the request " + state.msg.getUniqueId()
|
||||
+ ", since we received it a long time ago: " + timeSinceReceived);
|
||||
@ -464,6 +468,7 @@ class BuildHandler {
|
||||
pDrop = (float)Math.pow(pDrop, 16);
|
||||
if (_context.random().nextFloat() < pDrop) { // || (proactiveDrops > MAX_PROACTIVE_DROPS) ) ) {
|
||||
_context.statManager().addRateData("tunnel.rejectOverloaded", recvDelay, proactiveDrops);
|
||||
_context.throttle().setTunnelStatus("Rejecting tunnels: Request overload");
|
||||
if (true || (proactiveDrops < MAX_PROACTIVE_DROPS*2))
|
||||
response = TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
|
||||
else
|
||||
@ -652,6 +657,7 @@ class BuildHandler {
|
||||
}
|
||||
}
|
||||
if (dropped > 0) {
|
||||
_context.throttle().setTunnelStatus("Dropping tunnel requests: High load");
|
||||
// if the queue is backlogged, stop adding new messages
|
||||
_context.statManager().addRateData("tunnel.dropLoadBacklog", _inboundBuildMessages.size(), _inboundBuildMessages.size());
|
||||
} else {
|
||||
@ -660,6 +666,7 @@ class BuildHandler {
|
||||
pDrop = (float)Math.pow(pDrop, 16); // steeeep
|
||||
float f = _context.random().nextFloat();
|
||||
if ( (pDrop > f) && (allowProactiveDrop()) ) {
|
||||
_context.throttle().setTunnelStatus("Dropping tunnel requests: Queue time");
|
||||
_context.statManager().addRateData("tunnel.dropLoadProactive", queueTime, _inboundBuildMessages.size());
|
||||
} else {
|
||||
_inboundBuildMessages.add(new BuildMessageState(receivedMessage, from, fromHash));
|
||||
|
Reference in New Issue
Block a user