* Data Structures:
- New SDSCache for SimpleDataStructures - New SimpleByteCache for byte[] - Cache Hash, PublicKey, and SigningPublicKey - Remove global lock in ByteCache
This commit is contained in:
@ -327,11 +327,12 @@ public class ElGamalAESEngine {
|
||||
//_log.debug("len: " + len);
|
||||
if ((len < 0) || (len > decrypted.length - cur - Hash.HASH_LENGTH - 1))
|
||||
throw new Exception("Invalid size of payload (" + len + ", remaining " + (decrypted.length-cur) +")");
|
||||
byte hashval[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(decrypted, cur, hashval, 0, Hash.HASH_LENGTH);
|
||||
//byte hashval[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(decrypted, cur, hashval, 0, Hash.HASH_LENGTH);
|
||||
//readHash = new Hash();
|
||||
//readHash.setData(hashval);
|
||||
readHash = Hash.create(decrypted, cur);
|
||||
cur += Hash.HASH_LENGTH;
|
||||
readHash = new Hash();
|
||||
readHash.setData(hashval);
|
||||
byte flag = decrypted[cur++];
|
||||
if (flag == 0x01) {
|
||||
byte rekeyVal[] = new byte[SessionKey.KEYSIZE_BYTES];
|
||||
|
@ -202,9 +202,10 @@ public class ElGamalEngine {
|
||||
}
|
||||
|
||||
//ByteArrayInputStream bais = new ByteArrayInputStream(val, i, val.length - i);
|
||||
byte hashData[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(val, i + 1, hashData, 0, Hash.HASH_LENGTH);
|
||||
Hash hash = new Hash(hashData);
|
||||
//byte hashData[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(val, i + 1, hashData, 0, Hash.HASH_LENGTH);
|
||||
//Hash hash = new Hash(hashData);
|
||||
Hash hash = Hash.create(val, i + 1);
|
||||
byte rv[] = new byte[payloadLen];
|
||||
System.arraycopy(val, i + 1 + Hash.HASH_LENGTH, rv, 0, rv.length);
|
||||
|
||||
|
@ -35,7 +35,8 @@ public final class SHA256Generator {
|
||||
digest.update(source, start, len);
|
||||
byte rv[] = digest.digest();
|
||||
releaseGnu(digest);
|
||||
return new Hash(rv);
|
||||
//return new Hash(rv);
|
||||
return Hash.create(rv);
|
||||
}
|
||||
|
||||
public final void calculateHash(byte[] source, int start, int len, byte out[], int outOffset) {
|
||||
|
@ -25,7 +25,34 @@ public class Hash extends SimpleDataStructure {
|
||||
|
||||
public final static int HASH_LENGTH = 32;
|
||||
public final static Hash FAKE_HASH = new Hash(new byte[HASH_LENGTH]);
|
||||
private static final int CACHE_SIZE = 2048;
|
||||
|
||||
private static final SDSCache<Hash> _cache = new SDSCache(Hash.class, HASH_LENGTH, CACHE_SIZE);
|
||||
|
||||
/**
|
||||
* Pull from cache or return new
|
||||
* @since 0.8.3
|
||||
*/
|
||||
public static Hash create(byte[] data) {
|
||||
return _cache.get(data);
|
||||
}
|
||||
|
||||
/**
|
||||
* Pull from cache or return new
|
||||
* @since 0.8.3
|
||||
*/
|
||||
public static Hash create(byte[] data, int off) {
|
||||
return _cache.get(data, off);
|
||||
}
|
||||
|
||||
/**
|
||||
* Pull from cache or return new
|
||||
* @since 0.8.3
|
||||
*/
|
||||
public static Hash create(InputStream in) throws IOException {
|
||||
return _cache.get(in);
|
||||
}
|
||||
|
||||
public Hash() {
|
||||
super();
|
||||
}
|
||||
|
@ -61,10 +61,12 @@ public class KeysAndCert extends DataStructureImpl {
|
||||
}
|
||||
|
||||
public void readBytes(InputStream in) throws DataFormatException, IOException {
|
||||
_publicKey = new PublicKey();
|
||||
_publicKey.readBytes(in);
|
||||
_signingKey = new SigningPublicKey();
|
||||
_signingKey.readBytes(in);
|
||||
//_publicKey = new PublicKey();
|
||||
//_publicKey.readBytes(in);
|
||||
_publicKey = PublicKey.create(in);
|
||||
//_signingKey = new SigningPublicKey();
|
||||
//_signingKey.readBytes(in);
|
||||
_signingKey = SigningPublicKey.create(in);
|
||||
//_certificate = new Certificate();
|
||||
//_certificate.readBytes(in);
|
||||
_certificate = Certificate.create(in);
|
||||
|
@ -110,8 +110,9 @@ public class Lease extends DataStructureImpl {
|
||||
}
|
||||
|
||||
public void readBytes(InputStream in) throws DataFormatException, IOException {
|
||||
_gateway = new Hash();
|
||||
_gateway.readBytes(in);
|
||||
//_gateway = new Hash();
|
||||
//_gateway.readBytes(in);
|
||||
_gateway = Hash.create(in);
|
||||
_tunnelId = new TunnelId();
|
||||
_tunnelId.readBytes(in);
|
||||
_end = DataHelper.readDate(in);
|
||||
|
@ -9,6 +9,9 @@ package net.i2p.data;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.InputStream;
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Defines the PublicKey as defined by the I2P data structure spec.
|
||||
* A public key is 256byte Integer. The public key represents only the
|
||||
@ -18,6 +21,17 @@ package net.i2p.data;
|
||||
*/
|
||||
public class PublicKey extends SimpleDataStructure {
|
||||
public final static int KEYSIZE_BYTES = 256;
|
||||
private static final int CACHE_SIZE = 256;
|
||||
|
||||
private static final SDSCache<PublicKey> _cache = new SDSCache(PublicKey.class, KEYSIZE_BYTES, CACHE_SIZE);
|
||||
|
||||
/**
|
||||
* Pull from cache or return new
|
||||
* @since 0.8.3
|
||||
*/
|
||||
public static PublicKey create(InputStream in) throws IOException {
|
||||
return _cache.get(in);
|
||||
}
|
||||
|
||||
public PublicKey() {
|
||||
super();
|
||||
|
175
core/java/src/net/i2p/data/SDSCache.java
Normal file
175
core/java/src/net/i2p/data/SDSCache.java
Normal file
@ -0,0 +1,175 @@
|
||||
package net.i2p.data;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.SimpleByteCache;
|
||||
|
||||
/**
|
||||
* A least recently used cache with a max size, for SimpleDataStructures.
|
||||
* The index to the cache is the first 4 bytes of the data, so
|
||||
* the data must be sufficiently random.
|
||||
*
|
||||
* This caches the SDS objects, and also uses SimpleByteCache to cache
|
||||
* the unused byte arrays themselves
|
||||
*
|
||||
* Following is sample usage:
|
||||
* <pre>
|
||||
|
||||
private static final SDSCache<Foo> _cache = new SDSCache(Foo.class, LENGTH, 1024);
|
||||
|
||||
public static Foo create(byte[] data) {
|
||||
return _cache.get(data);
|
||||
}
|
||||
|
||||
public static Foo create(byte[] data, int off) {
|
||||
return _cache.get(data, off);
|
||||
}
|
||||
|
||||
public static Foo create(InputStream in) throws IOException {
|
||||
return _cache.get(in);
|
||||
}
|
||||
|
||||
* </pre>
|
||||
* @since 0.8.3
|
||||
* @author zzz
|
||||
*/
|
||||
public class SDSCache<V extends SimpleDataStructure> {
|
||||
private static final Log _log = I2PAppContext.getGlobalContext().logManager().getLog(SDSCache.class);
|
||||
|
||||
private static final Class[] conArg = new Class[] { byte[].class };
|
||||
private static final double MIN_FACTOR = 0.25;
|
||||
private static final double MAX_FACTOR = 3.0;
|
||||
private static final double FACTOR;
|
||||
static {
|
||||
long maxMemory = Runtime.getRuntime().maxMemory();
|
||||
FACTOR = Math.max(MIN_FACTOR, Math.min(MAX_FACTOR, maxMemory / (128*1024*1024d)));
|
||||
}
|
||||
|
||||
/** the LRU cache */
|
||||
private final Map<Integer, V> _cache;
|
||||
/** the byte array length for the class we are caching */
|
||||
private final int _datalen;
|
||||
/** the constructor for the class we are caching */
|
||||
private final Constructor<V> _rvCon;
|
||||
private final String _statName;
|
||||
|
||||
/**
|
||||
* @param rvClass the class that we are storing, i.e. an extension of SimpleDataStructure
|
||||
* @param len the length of the byte array in the SimpleDataStructure
|
||||
* @param max maximum size of the cache assuming 128MB of mem.
|
||||
* The actual max size will be scaled based on available memory.
|
||||
*/
|
||||
public SDSCache(Class<V> rvClass, int len, int max) {
|
||||
int size = (int) (max * FACTOR);
|
||||
_cache = new LHM(size);
|
||||
_datalen = len;
|
||||
try {
|
||||
_rvCon = rvClass.getConstructor(conArg);
|
||||
} catch (NoSuchMethodException e) {
|
||||
throw new RuntimeException("SDSCache init error", e);
|
||||
}
|
||||
_statName = "SDSCache." + rvClass.getSimpleName();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("New SDSCache for " + rvClass + " data size: " + len +
|
||||
" max: " + size + " max mem: " + (len * size));
|
||||
I2PAppContext.getGlobalContext().statManager().createRateStat(_statName, "Hit rate", "Router", new long[] { 10*60*1000 });
|
||||
}
|
||||
|
||||
/**
|
||||
* @param data non-null, the byte array for the SimpleDataStructure
|
||||
* @return the cached value if available, otherwise
|
||||
* makes a new object and returns it
|
||||
* @throws IllegalArgumentException if data is not the correct number of bytes
|
||||
* @throws NPE
|
||||
*/
|
||||
public V get(byte[] data) {
|
||||
if (data == null)
|
||||
throw new NullPointerException("Don't pull null data from the cache");
|
||||
int found;
|
||||
V rv;
|
||||
Integer key = hashCodeOf(data);
|
||||
synchronized(_cache) {
|
||||
rv = _cache.get(key);
|
||||
if (rv != null && DataHelper.eq(data, rv.getData())) {
|
||||
// found it, we don't need the data passed in any more
|
||||
SimpleByteCache.release(data);
|
||||
found = 1;
|
||||
} else {
|
||||
// make a new one
|
||||
try {
|
||||
rv = _rvCon.newInstance(new Object[] { data } );
|
||||
} catch (InstantiationException e) {
|
||||
throw new RuntimeException("SDSCache error", e);
|
||||
} catch (IllegalAccessException e) {
|
||||
throw new RuntimeException("SDSCache error", e);
|
||||
} catch (InvocationTargetException e) {
|
||||
throw new RuntimeException("SDSCache error", e);
|
||||
}
|
||||
_cache.put(key, rv);
|
||||
found = 0;
|
||||
}
|
||||
}
|
||||
I2PAppContext.getGlobalContext().statManager().addRateData(_statName, found, 0);
|
||||
return rv;
|
||||
}
|
||||
|
||||
/*
|
||||
* @param b non-null byte array containing the data, data will be copied to not hold the reference
|
||||
* @param off offset in the array to start reading from
|
||||
* @return the cached value if available, otherwise
|
||||
* makes a new object and returns it
|
||||
* @throws AIOOBE if not enough bytes
|
||||
* @throws NPE
|
||||
*/
|
||||
public V get(byte[] b, int off) {
|
||||
byte[] data = SimpleByteCache.acquire(_datalen);
|
||||
System.arraycopy(b, off, data, 0, _datalen);
|
||||
return get(data);
|
||||
}
|
||||
|
||||
/*
|
||||
* @param in a stream from which the bytes will be read
|
||||
* @return the cached value if available, otherwise
|
||||
* makes a new object and returns it
|
||||
* @throws IOException if not enough bytes
|
||||
*/
|
||||
public V get(InputStream in) throws IOException {
|
||||
byte[] data = SimpleByteCache.acquire(_datalen);
|
||||
int read = DataHelper.read(in, data);
|
||||
if (read != _datalen)
|
||||
throw new EOFException("Not enough bytes to read the data");
|
||||
return get(data);
|
||||
}
|
||||
|
||||
/**
|
||||
* We assume the data has enough randomness in it, so use the first 4 bytes for speed.
|
||||
*/
|
||||
private static Integer hashCodeOf(byte[] data) {
|
||||
int rv = data[0];
|
||||
for (int i = 1; i < 4; i++)
|
||||
rv ^= (data[i] << (i*8));
|
||||
return Integer.valueOf(rv);
|
||||
}
|
||||
|
||||
private static class LHM<K, V> extends LinkedHashMap<K, V> {
|
||||
private final int _max;
|
||||
|
||||
public LHM(int max) {
|
||||
super(max, 0.75f, true);
|
||||
_max = max;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean removeEldestEntry(Map.Entry<K, V> eldest) {
|
||||
return size() > _max;
|
||||
}
|
||||
}
|
||||
}
|
@ -9,6 +9,9 @@ package net.i2p.data;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.InputStream;
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Defines the SigningPublicKey as defined by the I2P data structure spec.
|
||||
* A public key is 256byte Integer. The public key represents only the
|
||||
@ -19,6 +22,17 @@ package net.i2p.data;
|
||||
*/
|
||||
public class SigningPublicKey extends SimpleDataStructure {
|
||||
public final static int KEYSIZE_BYTES = 128;
|
||||
private static final int CACHE_SIZE = 256;
|
||||
|
||||
private static final SDSCache<SigningPublicKey> _cache = new SDSCache(SigningPublicKey.class, KEYSIZE_BYTES, CACHE_SIZE);
|
||||
|
||||
/**
|
||||
* Pull from cache or return new
|
||||
* @since 0.8.3
|
||||
*/
|
||||
public static SigningPublicKey create(InputStream in) throws IOException {
|
||||
return _cache.get(in);
|
||||
}
|
||||
|
||||
public SigningPublicKey() {
|
||||
super();
|
||||
|
@ -33,13 +33,15 @@ public class DestLookupMessage extends I2CPMessageImpl {
|
||||
}
|
||||
|
||||
protected void doReadMessage(InputStream in, int size) throws I2CPMessageException, IOException {
|
||||
Hash h = new Hash();
|
||||
//Hash h = new Hash();
|
||||
try {
|
||||
h.readBytes(in);
|
||||
} catch (DataFormatException dfe) {
|
||||
//h.readBytes(in);
|
||||
_hash = Hash.create(in);
|
||||
//} catch (DataFormatException dfe) {
|
||||
} catch (IllegalArgumentException dfe) {
|
||||
throw new I2CPMessageException("Unable to load the hash", dfe);
|
||||
}
|
||||
_hash = h;
|
||||
//_hash = h;
|
||||
}
|
||||
|
||||
protected byte[] doWriteMessage() throws I2CPMessageException, IOException {
|
||||
|
@ -86,8 +86,9 @@ public class RequestLeaseSetMessage extends I2CPMessageImpl {
|
||||
int numTunnels = (int) DataHelper.readLong(in, 1);
|
||||
_endpoints.clear();
|
||||
for (int i = 0; i < numTunnels; i++) {
|
||||
Hash router = new Hash();
|
||||
router.readBytes(in);
|
||||
//Hash router = new Hash();
|
||||
//router.readBytes(in);
|
||||
Hash router = Hash.create(in);
|
||||
TunnelId tunnel = new TunnelId();
|
||||
tunnel.readBytes(in);
|
||||
_endpoints.add(new TunnelEndpoint(router, tunnel));
|
||||
|
@ -1,9 +1,9 @@
|
||||
package net.i2p.util;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
@ -57,7 +57,8 @@ import net.i2p.data.ByteArray;
|
||||
*/
|
||||
public final class ByteCache {
|
||||
|
||||
private static final Map<Integer, ByteCache> _caches = new HashMap(16);
|
||||
private static final Log _log = I2PAppContext.getGlobalContext().logManager().getLog(ByteCache.class);
|
||||
private static final Map<Integer, ByteCache> _caches = new ConcurrentHashMap(16);
|
||||
|
||||
/**
|
||||
* max size in bytes of each cache
|
||||
@ -74,8 +75,9 @@ public final class ByteCache {
|
||||
/**
|
||||
* Get a cache responsible for objects of the given size
|
||||
*
|
||||
* @param cacheSize how large we want the cache to grow before using on
|
||||
* demand allocation
|
||||
* @param cacheSize how large we want the cache to grow
|
||||
* (number of objects, NOT memory size)
|
||||
* before discarding released objects.
|
||||
* Since 0.7.14, a limit of 1MB / size is enforced
|
||||
* for the typical 128MB max memory JVM
|
||||
* @param size how large should the objects cached be?
|
||||
@ -84,12 +86,11 @@ public final class ByteCache {
|
||||
if (cacheSize * size > MAX_CACHE)
|
||||
cacheSize = MAX_CACHE / size;
|
||||
Integer sz = Integer.valueOf(size);
|
||||
ByteCache cache = null;
|
||||
synchronized (_caches) {
|
||||
if (!_caches.containsKey(sz))
|
||||
_caches.put(sz, new ByteCache(cacheSize, size));
|
||||
cache = _caches.get(sz);
|
||||
}
|
||||
ByteCache cache = _caches.get(sz);
|
||||
if (cache == null) {
|
||||
cache = new ByteCache(cacheSize, size);
|
||||
_caches.put(sz, cache);
|
||||
; }
|
||||
cache.resize(cacheSize);
|
||||
//I2PAppContext.getGlobalContext().logManager().getLog(ByteCache.class).error("ByteCache size: " + size + " max: " + cacheSize, new Exception("from"));
|
||||
return cache;
|
||||
@ -102,10 +103,9 @@ public final class ByteCache {
|
||||
public static void clearAll() {
|
||||
for (ByteCache bc : _caches.values())
|
||||
bc.clear();
|
||||
I2PAppContext.getGlobalContext().logManager().getLog(ByteCache.class).warn("WARNING: Low memory, clearing byte caches");
|
||||
_log.warn("WARNING: Low memory, clearing byte caches");
|
||||
}
|
||||
|
||||
private Log _log;
|
||||
/** list of available and available entries */
|
||||
private Queue<ByteArray> _available;
|
||||
private int _maxCached;
|
||||
@ -116,7 +116,7 @@ public final class ByteCache {
|
||||
private static final boolean _cache = true;
|
||||
|
||||
/** how often do we cleanup the cache */
|
||||
private static final int CLEANUP_FREQUENCY = 30*1000;
|
||||
private static final int CLEANUP_FREQUENCY = 33*1000;
|
||||
/** if we haven't exceeded the cache size in 2 minutes, cut our cache in half */
|
||||
private static final long EXPIRE_PERIOD = 2*60*1000;
|
||||
|
||||
@ -126,9 +126,8 @@ public final class ByteCache {
|
||||
_maxCached = maxCachedEntries;
|
||||
_entrySize = entrySize;
|
||||
_lastOverflow = -1;
|
||||
SimpleScheduler.getInstance().addPeriodicEvent(new Cleanup(), CLEANUP_FREQUENCY);
|
||||
_log = I2PAppContext.getGlobalContext().logManager().getLog(ByteCache.class);
|
||||
I2PAppContext.getGlobalContext().statManager().createRateStat("byteCache.memory." + entrySize, "Memory usage (B)", "Router", new long[] { 60*1000 });
|
||||
SimpleScheduler.getInstance().addPeriodicEvent(new Cleanup(), CLEANUP_FREQUENCY + (entrySize % 7)); //stagger
|
||||
I2PAppContext.getGlobalContext().statManager().createRateStat("byteCache.memory." + entrySize, "Memory usage (B)", "Router", new long[] { 10*60*1000 });
|
||||
}
|
||||
|
||||
private void resize(int maxCachedEntries) {
|
||||
|
125
core/java/src/net/i2p/util/SimpleByteCache.java
Normal file
125
core/java/src/net/i2p/util/SimpleByteCache.java
Normal file
@ -0,0 +1,125 @@
|
||||
package net.i2p.util;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
|
||||
/**
|
||||
* Like ByteCache but works directly with byte arrays, not ByteArrays.
|
||||
* These are designed to be small caches, so there's no cleaner task
|
||||
* like there is in ByteCache. And we don't zero out the arrays here.
|
||||
* Only the static methods are public here.
|
||||
*
|
||||
* @since 0.8.3
|
||||
*/
|
||||
public final class SimpleByteCache {
|
||||
|
||||
private static final Map<Integer, SimpleByteCache> _caches = new ConcurrentHashMap(8);
|
||||
|
||||
private static final int DEFAULT_SIZE = 16;
|
||||
|
||||
/**
|
||||
* Get a cache responsible for arrays of the given size
|
||||
*
|
||||
* @param size how large should the objects cached be?
|
||||
*/
|
||||
public static SimpleByteCache getInstance(int size) {
|
||||
return getInstance(DEFAULT_SIZE, size);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a cache responsible for objects of the given size
|
||||
*
|
||||
* @param cacheSize how large we want the cache to grow
|
||||
* (number of objects, NOT memory size)
|
||||
* before discarding released objects.
|
||||
* @param size how large should the objects cached be?
|
||||
*/
|
||||
public static SimpleByteCache getInstance(int cacheSize, int size) {
|
||||
Integer sz = Integer.valueOf(size);
|
||||
SimpleByteCache cache = _caches.get(sz);
|
||||
if (cache == null) {
|
||||
cache = new SimpleByteCache(cacheSize, size);
|
||||
_caches.put(sz, cache);
|
||||
}
|
||||
cache.resize(cacheSize);
|
||||
return cache;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear everything (memory pressure)
|
||||
*/
|
||||
public static void clearAll() {
|
||||
for (SimpleByteCache bc : _caches.values())
|
||||
bc.clear();
|
||||
}
|
||||
|
||||
/** list of available and available entries */
|
||||
private Queue<byte[]> _available;
|
||||
private int _maxCached;
|
||||
private int _entrySize;
|
||||
|
||||
private SimpleByteCache(int maxCachedEntries, int entrySize) {
|
||||
_available = new LinkedBlockingQueue(maxCachedEntries);
|
||||
_maxCached = maxCachedEntries;
|
||||
_entrySize = entrySize;
|
||||
}
|
||||
|
||||
private void resize(int maxCachedEntries) {
|
||||
if (_maxCached >= maxCachedEntries) return;
|
||||
_maxCached = maxCachedEntries;
|
||||
// make a bigger one, move the cached items over
|
||||
Queue<byte[]> newLBQ = new LinkedBlockingQueue(maxCachedEntries);
|
||||
byte[] ba;
|
||||
while ((ba = _available.poll()) != null)
|
||||
newLBQ.offer(ba);
|
||||
_available = newLBQ;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the next available array, either from the cache or a brand new one
|
||||
*/
|
||||
public static byte[] acquire(int size) {
|
||||
return getInstance(size).acquire();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the next available array, either from the cache or a brand new one
|
||||
*/
|
||||
private byte[] acquire() {
|
||||
byte[] rv = _available.poll();
|
||||
if (rv == null)
|
||||
rv = new byte[_entrySize];
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* Put this array back onto the available cache for reuse
|
||||
*/
|
||||
public static void release(byte[] entry) {
|
||||
SimpleByteCache cache = _caches.get(entry.length);
|
||||
if (cache != null)
|
||||
cache.releaseIt(entry);
|
||||
}
|
||||
|
||||
/**
|
||||
* Put this array back onto the available cache for reuse
|
||||
*/
|
||||
private void releaseIt(byte[] entry) {
|
||||
if (entry == null || entry.length != _entrySize)
|
||||
return;
|
||||
// should be safe without this
|
||||
//Arrays.fill(entry, (byte) 0);
|
||||
_available.offer(entry);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear everything (memory pressure)
|
||||
*/
|
||||
private void clear() {
|
||||
_available.clear();
|
||||
}
|
||||
}
|
@ -93,9 +93,10 @@ public class BuildRequestRecord {
|
||||
* the gateway to which the reply should be sent.
|
||||
*/
|
||||
public Hash readNextIdentity() {
|
||||
byte rv[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(_data.getData(), _data.getOffset() + OFF_SEND_IDENT, rv, 0, Hash.HASH_LENGTH);
|
||||
return new Hash(rv);
|
||||
//byte rv[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(_data.getData(), _data.getOffset() + OFF_SEND_IDENT, rv, 0, Hash.HASH_LENGTH);
|
||||
//return new Hash(rv);
|
||||
return Hash.create(_data.getData(), _data.getOffset() + OFF_SEND_IDENT);
|
||||
}
|
||||
/**
|
||||
* Tunnel layer encryption key that the current hop should use
|
||||
|
@ -133,15 +133,17 @@ public class DatabaseLookupMessage extends I2NPMessageImpl {
|
||||
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
|
||||
int curIndex = offset;
|
||||
|
||||
byte keyData[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, curIndex, keyData, 0, Hash.HASH_LENGTH);
|
||||
//byte keyData[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(data, curIndex, keyData, 0, Hash.HASH_LENGTH);
|
||||
_key = Hash.create(data, curIndex);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
_key = new Hash(keyData);
|
||||
//_key = new Hash(keyData);
|
||||
|
||||
byte fromData[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, curIndex, fromData, 0, Hash.HASH_LENGTH);
|
||||
//byte fromData[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(data, curIndex, fromData, 0, Hash.HASH_LENGTH);
|
||||
_fromHash = Hash.create(data, curIndex);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
_fromHash = new Hash(fromData);
|
||||
//_fromHash = new Hash(fromData);
|
||||
|
||||
boolean tunnelSpecified = false;
|
||||
switch (data[curIndex]) {
|
||||
@ -168,10 +170,11 @@ public class DatabaseLookupMessage extends I2NPMessageImpl {
|
||||
throw new I2NPMessageException("Invalid number of peers - " + numPeers);
|
||||
Set<Hash> peers = new HashSet(numPeers);
|
||||
for (int i = 0; i < numPeers; i++) {
|
||||
byte peer[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, curIndex, peer, 0, Hash.HASH_LENGTH);
|
||||
//byte peer[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(data, curIndex, peer, 0, Hash.HASH_LENGTH);
|
||||
Hash p = Hash.create(data, curIndex);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
peers.add(new Hash(peer));
|
||||
peers.add(p);
|
||||
}
|
||||
_dontIncludePeers = peers;
|
||||
}
|
||||
|
@ -55,26 +55,29 @@ public class DatabaseSearchReplyMessage extends I2NPMessageImpl {
|
||||
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
|
||||
int curIndex = offset;
|
||||
|
||||
byte keyData[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, curIndex, keyData, 0, Hash.HASH_LENGTH);
|
||||
//byte keyData[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(data, curIndex, keyData, 0, Hash.HASH_LENGTH);
|
||||
_key = Hash.create(data, curIndex);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
_key = new Hash(keyData);
|
||||
//_key = new Hash(keyData);
|
||||
|
||||
int num = (int)DataHelper.fromLong(data, curIndex, 1);
|
||||
curIndex++;
|
||||
|
||||
_peerHashes.clear();
|
||||
for (int i = 0; i < num; i++) {
|
||||
byte peer[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, curIndex, peer, 0, Hash.HASH_LENGTH);
|
||||
//byte peer[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(data, curIndex, peer, 0, Hash.HASH_LENGTH);
|
||||
Hash p = Hash.create(data, curIndex);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
addReply(new Hash(peer));
|
||||
addReply(p);
|
||||
}
|
||||
|
||||
byte from[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, curIndex, from, 0, Hash.HASH_LENGTH);
|
||||
//byte from[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(data, curIndex, from, 0, Hash.HASH_LENGTH);
|
||||
_from = Hash.create(data, curIndex);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
_from = new Hash(from);
|
||||
//_from = new Hash(from);
|
||||
|
||||
//_context.statManager().addRateData("netDb.searchReplyMessageReceive", num*32 + 64, 1);
|
||||
}
|
||||
|
@ -113,10 +113,11 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
|
||||
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
|
||||
int curIndex = offset;
|
||||
|
||||
byte keyData[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, curIndex, keyData, 0, Hash.HASH_LENGTH);
|
||||
//byte keyData[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(data, curIndex, keyData, 0, Hash.HASH_LENGTH);
|
||||
_key = Hash.create(data, curIndex);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
_key = new Hash(keyData);
|
||||
//_key = new Hash(keyData);
|
||||
|
||||
_type = (int)DataHelper.fromLong(data, curIndex, 1);
|
||||
curIndex++;
|
||||
@ -130,10 +131,11 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
|
||||
_replyTunnel = new TunnelId(tunnel);
|
||||
curIndex += 4;
|
||||
|
||||
byte gw[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, curIndex, gw, 0, Hash.HASH_LENGTH);
|
||||
//byte gw[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(data, curIndex, gw, 0, Hash.HASH_LENGTH);
|
||||
_replyGateway = Hash.create(data, curIndex);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
_replyGateway = new Hash(gw);
|
||||
//_replyGateway = new Hash(gw);
|
||||
} else {
|
||||
_replyTunnel = null;
|
||||
_replyGateway = null;
|
||||
|
@ -90,18 +90,21 @@ public class DeliveryInstructions extends DataStructureImpl {
|
||||
case FLAG_MODE_LOCAL:
|
||||
break;
|
||||
case FLAG_MODE_DESTINATION:
|
||||
Hash destHash = new Hash();
|
||||
destHash.readBytes(in);
|
||||
//Hash destHash = new Hash();
|
||||
//destHash.readBytes(in);
|
||||
Hash destHash = Hash.create(in);
|
||||
setDestination(destHash);
|
||||
break;
|
||||
case FLAG_MODE_ROUTER:
|
||||
Hash routerHash = new Hash();
|
||||
routerHash.readBytes(in);
|
||||
//Hash routerHash = new Hash();
|
||||
//routerHash.readBytes(in);
|
||||
Hash routerHash = Hash.create(in);
|
||||
setRouter(routerHash);
|
||||
break;
|
||||
case FLAG_MODE_TUNNEL:
|
||||
Hash tunnelRouterHash = new Hash();
|
||||
tunnelRouterHash.readBytes(in);
|
||||
//Hash tunnelRouterHash = new Hash();
|
||||
//tunnelRouterHash.readBytes(in);
|
||||
Hash tunnelRouterHash = Hash.create(in);
|
||||
setRouter(tunnelRouterHash);
|
||||
TunnelId id = new TunnelId();
|
||||
id.readBytes(in);
|
||||
@ -140,22 +143,25 @@ public class DeliveryInstructions extends DataStructureImpl {
|
||||
case FLAG_MODE_LOCAL:
|
||||
break;
|
||||
case FLAG_MODE_DESTINATION:
|
||||
byte destHash[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, cur, destHash, 0, Hash.HASH_LENGTH);
|
||||
//byte destHash[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(data, cur, destHash, 0, Hash.HASH_LENGTH);
|
||||
Hash dh = Hash.create(data, cur);
|
||||
cur += Hash.HASH_LENGTH;
|
||||
setDestination(new Hash(destHash));
|
||||
setDestination(dh);
|
||||
break;
|
||||
case FLAG_MODE_ROUTER:
|
||||
byte routerHash[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, cur, routerHash, 0, Hash.HASH_LENGTH);
|
||||
//byte routerHash[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(data, cur, routerHash, 0, Hash.HASH_LENGTH);
|
||||
Hash rh = Hash.create(data, cur);
|
||||
cur += Hash.HASH_LENGTH;
|
||||
setRouter(new Hash(routerHash));
|
||||
setRouter(rh);
|
||||
break;
|
||||
case FLAG_MODE_TUNNEL:
|
||||
byte tunnelRouterHash[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, cur, tunnelRouterHash, 0, Hash.HASH_LENGTH);
|
||||
//byte tunnelRouterHash[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(data, cur, tunnelRouterHash, 0, Hash.HASH_LENGTH);
|
||||
Hash trh = Hash.create(data, cur);
|
||||
cur += Hash.HASH_LENGTH;
|
||||
setRouter(new Hash(tunnelRouterHash));
|
||||
setRouter(trh);
|
||||
setTunnelId(new TunnelId(DataHelper.fromLong(data, cur, 4)));
|
||||
cur += 4;
|
||||
break;
|
||||
|
@ -13,6 +13,7 @@ import java.util.Set;
|
||||
import java.util.zip.GZIPInputStream;
|
||||
import java.util.zip.GZIPOutputStream;
|
||||
|
||||
import net.i2p.data.Base64;
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Hash;
|
||||
@ -290,15 +291,19 @@ class ProfilePersistenceHelper {
|
||||
_log.warn("Error loading properties from " + file.getName(), ioe);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private Hash getHash(String name) {
|
||||
String key = name.substring("profile-".length());
|
||||
key = key.substring(0, key.length() - ".dat".length());
|
||||
Hash h = new Hash();
|
||||
//Hash h = new Hash();
|
||||
try {
|
||||
h.fromBase64(key);
|
||||
//h.fromBase64(key);
|
||||
byte[] b = Base64.decode(key);
|
||||
if (b == null)
|
||||
return null;
|
||||
Hash h = Hash.create(b);
|
||||
return h;
|
||||
} catch (DataFormatException dfe) {
|
||||
} catch (Exception dfe) {
|
||||
_log.warn("Invalid base64 [" + key + "]", dfe);
|
||||
return null;
|
||||
}
|
||||
|
@ -327,11 +327,12 @@ public class FragmentHandler {
|
||||
offset += 4;
|
||||
}
|
||||
if ( (type == TYPE_ROUTER) || (type == TYPE_TUNNEL) ) {
|
||||
byte h[] = new byte[Hash.HASH_LENGTH];
|
||||
if (offset + Hash.HASH_LENGTH >= preprocessed.length)
|
||||
return -1;
|
||||
System.arraycopy(preprocessed, offset, h, 0, Hash.HASH_LENGTH);
|
||||
router = new Hash(h);
|
||||
//byte h[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(preprocessed, offset, h, 0, Hash.HASH_LENGTH);
|
||||
//router = new Hash(h);
|
||||
router = Hash.create(preprocessed, offset);
|
||||
offset += Hash.HASH_LENGTH;
|
||||
}
|
||||
if (fragmented) {
|
||||
|
Reference in New Issue
Block a user