* Stats: Increase coalesce time to 50s (was 20s) to reduce CPU use
* Peer Profiles: - Increase reorganize time to 45s (was 30s) to reduce CPU use and lock contention - Remove some stat rates
This commit is contained in:
@ -75,6 +75,9 @@ public class Router {
|
||||
/** used to differentiate routerInfo files on different networks */
|
||||
public static final int NETWORK_ID = 2;
|
||||
|
||||
/** coalesce stats this often - should be a little less than one minute, so the graphs get updated */
|
||||
private static final int COALESCE_TIME = 50*1000;
|
||||
|
||||
/** this puts an 'H' in your routerInfo **/
|
||||
public final static String PROP_HIDDEN = "router.hiddenMode";
|
||||
/** this does not put an 'H' in your routerInfo **/
|
||||
@ -312,7 +315,7 @@ public class Router {
|
||||
_context.inNetMessagePool().startup();
|
||||
startupQueue();
|
||||
//_context.jobQueue().addJob(new CoalesceStatsJob(_context));
|
||||
SimpleScheduler.getInstance().addPeriodicEvent(new CoalesceStatsEvent(_context), 20*1000);
|
||||
SimpleScheduler.getInstance().addPeriodicEvent(new CoalesceStatsEvent(_context), COALESCE_TIME);
|
||||
_context.jobQueue().addJob(new UpdateRoutingKeyModifierJob(_context));
|
||||
warmupCrypto();
|
||||
//_sessionKeyPersistenceHelper.startup();
|
||||
@ -1348,7 +1351,7 @@ private static class CoalesceStatsEvent implements SimpleTimer.TimedEvent {
|
||||
long used = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();
|
||||
getContext().statManager().addRateData("router.memoryUsed", used, 0);
|
||||
|
||||
getContext().tunnelDispatcher().updateParticipatingStats();
|
||||
getContext().tunnelDispatcher().updateParticipatingStats(COALESCE_TIME);
|
||||
|
||||
getContext().statManager().coalesceStats();
|
||||
|
||||
|
@ -61,7 +61,7 @@ class PeerManager {
|
||||
_peersByCapability[i] = new ArrayList(64);
|
||||
loadProfiles();
|
||||
////_context.jobQueue().addJob(new EvaluateProfilesJob(_context));
|
||||
SimpleScheduler.getInstance().addPeriodicEvent(new Reorg(), 0, 30*1000);
|
||||
SimpleScheduler.getInstance().addPeriodicEvent(new Reorg(), 0, 45*1000);
|
||||
//_context.jobQueue().addJob(new PersistProfilesJob(_context, this));
|
||||
}
|
||||
|
||||
|
@ -108,11 +108,11 @@ public class ProfileOrganizer {
|
||||
_thresholdIntegrationValue = 0.0d;
|
||||
_persistenceHelper = new ProfilePersistenceHelper(_context);
|
||||
|
||||
_context.statManager().createRateStat("peer.profileSortTime", "How long the reorg takes sorting peers", "Peers", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("peer.profileCoalesceTime", "How long the reorg takes coalescing peer stats", "Peers", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("peer.profileThresholdTime", "How long the reorg takes determining the tier thresholds", "Peers", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("peer.profilePlaceTime", "How long the reorg takes placing peers in the tiers", "Peers", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("peer.profileReorgTime", "How long the reorg takes overall", "Peers", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("peer.profileSortTime", "How long the reorg takes sorting peers", "Peers", new long[] { 10*60*1000 });
|
||||
_context.statManager().createRateStat("peer.profileCoalesceTime", "How long the reorg takes coalescing peer stats", "Peers", new long[] { 10*60*1000 });
|
||||
_context.statManager().createRateStat("peer.profileThresholdTime", "How long the reorg takes determining the tier thresholds", "Peers", new long[] { 10*60*1000 });
|
||||
_context.statManager().createRateStat("peer.profilePlaceTime", "How long the reorg takes placing peers in the tiers", "Peers", new long[] { 10*60*1000 });
|
||||
_context.statManager().createRateStat("peer.profileReorgTime", "How long the reorg takes overall", "Peers", new long[] { 10*60*1000 });
|
||||
}
|
||||
|
||||
private void getReadLock() {
|
||||
|
@ -498,14 +498,14 @@ public class TunnelDispatcher implements Service {
|
||||
|
||||
/**
|
||||
* Generate a current estimate of usage per-participating-tunnel lifetime.
|
||||
* The router code calls this every 20s.
|
||||
* The router code calls this every 'ms' millisecs.
|
||||
* This is better than waiting until the tunnel expires to update the rate,
|
||||
* as we want this to be current because it's an important part of
|
||||
* the throttle code.
|
||||
* Stay a little conservative by taking the counts only for tunnels 1-10m old
|
||||
* and computing the average from that.
|
||||
*/
|
||||
public void updateParticipatingStats() {
|
||||
public void updateParticipatingStats(int ms) {
|
||||
List<HopConfig> participating = listParticipatingTunnels();
|
||||
int size = participating.size();
|
||||
long count = 0;
|
||||
@ -527,9 +527,9 @@ public class TunnelDispatcher implements Service {
|
||||
}
|
||||
if (tcount > 0)
|
||||
count = count * 30 / tcount;
|
||||
_context.statManager().addRateData("tunnel.participatingMessageCount", count, 20*1000);
|
||||
_context.statManager().addRateData("tunnel.participatingBandwidth", bw*1024/20, 20*1000);
|
||||
_context.statManager().addRateData("tunnel.participatingBandwidthOut", bwOut*1024/20, 20*1000);
|
||||
_context.statManager().addRateData("tunnel.participatingMessageCount", count, ms);
|
||||
_context.statManager().addRateData("tunnel.participatingBandwidth", bw*1024/(ms/1000), ms);
|
||||
_context.statManager().addRateData("tunnel.participatingBandwidthOut", bwOut*1024/(ms/1000), ms);
|
||||
_context.statManager().addRateData("tunnel.participatingTunnels", size, 0);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user