mirror of
https://github.com/go-i2p/reseed-tools.git
synced 2025-09-12 11:47:44 -04:00
Compare commits
16 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
8a196b6f05 | ||
![]() |
abbe458a39 | ||
![]() |
07a53cbb2c | ||
![]() |
fff0db25ad | ||
![]() |
62d78f62bd | ||
![]() |
6facd10b43 | ||
![]() |
69ed590ed0 | ||
![]() |
81f8f37949 | ||
![]() |
068ae081ff | ||
![]() |
57ecfe68ce | ||
![]() |
7b78a9bc09 | ||
![]() |
0943238f79 | ||
![]() |
2bfd68a72c | ||
![]() |
9669abd3d0 | ||
![]() |
c25cf60449 | ||
![]() |
2ae7437d72 |
215
CHANGELOG.html
215
CHANGELOG.html
@@ -1,215 +0,0 @@
|
|||||||
<html>
|
|
||||||
<head>
|
|
||||||
<title>
|
|
||||||
I2P Reseed Tools
|
|
||||||
</title>
|
|
||||||
<meta name="author" content="eyedeekay" />
|
|
||||||
<meta name="description" content="reseed-tools" />
|
|
||||||
<meta name="keywords" content="master" />
|
|
||||||
<link rel="stylesheet" type="text/css" href="style.css" />
|
|
||||||
<link rel="stylesheet" type="text/css" href="showhider.css" />
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<div id="navbar">
|
|
||||||
<a href="#shownav">
|
|
||||||
Show navigation
|
|
||||||
</a>
|
|
||||||
<div id="shownav">
|
|
||||||
<div id="hidenav">
|
|
||||||
<ul>
|
|
||||||
<li>
|
|
||||||
<a href="..">
|
|
||||||
Up one level ^
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="index.html">
|
|
||||||
index
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="CHANGELOG.html">
|
|
||||||
CHANGELOG
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="content/index.html">
|
|
||||||
content/index.html
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="docs/index.html">
|
|
||||||
docs/index.html
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="index.html">
|
|
||||||
index.html
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="docs/DEBIAN.html">
|
|
||||||
docs/DEBIAN
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="docs/DOCKER.html">
|
|
||||||
docs/DOCKER
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="docs/EXAMPLES.html">
|
|
||||||
docs/EXAMPLES
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="docs/PLUGIN.html">
|
|
||||||
docs/PLUGIN
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="docs/index.html">
|
|
||||||
docs/index
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="docs/SERVICES.html">
|
|
||||||
docs/SERVICES
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="docs/TLS.html">
|
|
||||||
docs/TLS
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="docs/index.html">
|
|
||||||
docs/index.html
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
</ul>
|
|
||||||
<br>
|
|
||||||
<a href="#hidenav">
|
|
||||||
Hide Navigation
|
|
||||||
</a>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<a id="returnhome" href="/">
|
|
||||||
/
|
|
||||||
</a>
|
|
||||||
<p>
|
|
||||||
2021-12-16
|
|
||||||
* app.Version = “0.2.11”
|
|
||||||
* include license file in plugin
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
2021-12-14
|
|
||||||
* app.Version = “0.2.10”
|
|
||||||
* restart changelog
|
|
||||||
* fix websiteURL in plugin.config
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
2019-04-21
|
|
||||||
* app.Version = “0.1.7”
|
|
||||||
* enabling TLS 1.3
|
|
||||||
<em>
|
|
||||||
only
|
|
||||||
</em>
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
2016-12-21
|
|
||||||
* deactivating previous random time delta, makes only sense when patching ri too
|
|
||||||
* app.Version = “0.1.6”
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
2016-10-09
|
|
||||||
* seed the math random generator with time.Now().UnixNano()
|
|
||||||
* added 6h+6h random time delta at su3-age to increase anonymity
|
|
||||||
* app.Version = “0.1.5”
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
2016-05-15
|
|
||||||
* README.md updated
|
|
||||||
* allowed routerInfos age increased from 96 to 192 hours
|
|
||||||
* app.Version = “0.1.4”
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
2016-03-05
|
|
||||||
* app.Version = “0.1.3”
|
|
||||||
* CRL creation added
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
2016-01-31
|
|
||||||
* allowed TLS ciphers updated (hardened)
|
|
||||||
* TLS certificate generation: RSA 4096 –> ECDSAWithSHA512 384bit secp384r1
|
|
||||||
* ECDHE handshake: only CurveP384 + CurveP521, default CurveP256 removed
|
|
||||||
* TLS certificate valid: 2y –> 5y
|
|
||||||
* throttled.PerDay(4) –> PerHour(4), to enable limited testing
|
|
||||||
* su3 RebuildInterval: 24h –> 90h, higher anonymity for the running i2p-router
|
|
||||||
* numRi per su3 file: 75 –> 77
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
2016-01
|
|
||||||
* fork from
|
|
||||||
<a href="https://i2pgit.org/idk/reseed-tools">
|
|
||||||
https://i2pgit.org/idk/reseed-tools
|
|
||||||
</a>
|
|
||||||
</p>
|
|
||||||
<div id="sourcecode">
|
|
||||||
<span id="sourcehead">
|
|
||||||
<strong>
|
|
||||||
Get the source code:
|
|
||||||
</strong>
|
|
||||||
</span>
|
|
||||||
<ul>
|
|
||||||
<li>
|
|
||||||
<a href="https://i2pgit.org/idk/reseed-tools">
|
|
||||||
Source Repository: (https://i2pgit.org/idk/reseed-tools)
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
</ul>
|
|
||||||
</div>
|
|
||||||
<div>
|
|
||||||
<a href="#show">
|
|
||||||
Show license
|
|
||||||
</a>
|
|
||||||
<div id="show">
|
|
||||||
<div id="hide">
|
|
||||||
<pre><code>Copyright (c) 2014 Matt Drollette
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
|
||||||
all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
||||||
</code></pre>
|
|
||||||
<a href="#hide">
|
|
||||||
Hide license
|
|
||||||
</a>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div>
|
|
||||||
<iframe src="https://snowflake.torproject.org/embed.html" width="320" height="240" frameborder="0" scrolling="no"></iframe>
|
|
||||||
</div>
|
|
||||||
<div>
|
|
||||||
<a href="https://geti2p.net/">
|
|
||||||
<img src="i2plogo.png"></img>
|
|
||||||
I2P
|
|
||||||
</a>
|
|
||||||
</div>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
351
cmd/diagnose.go
351
cmd/diagnose.go
@@ -61,157 +61,232 @@ to prevent "mapping format violation" errors during reseed operations.`,
|
|||||||
|
|
||||||
// diagnoseRouterInfoFiles performs the main diagnosis logic for RouterInfo files
|
// diagnoseRouterInfoFiles performs the main diagnosis logic for RouterInfo files
|
||||||
func diagnoseRouterInfoFiles(ctx *cli.Context) error {
|
func diagnoseRouterInfoFiles(ctx *cli.Context) error {
|
||||||
netdbPath := ctx.String("netdb")
|
config, err := extractDiagnosisConfig(ctx)
|
||||||
maxAge := ctx.Duration("max-age")
|
|
||||||
removeBad := ctx.Bool("remove-bad")
|
|
||||||
verbose := ctx.Bool("verbose")
|
|
||||||
debug := ctx.Bool("debug")
|
|
||||||
|
|
||||||
// Set debug mode if requested
|
|
||||||
if debug {
|
|
||||||
os.Setenv("I2P_DEBUG", "true")
|
|
||||||
fmt.Println("Debug mode enabled (I2P_DEBUG=true)")
|
|
||||||
}
|
|
||||||
|
|
||||||
if netdbPath == "" {
|
|
||||||
return fmt.Errorf("netDb path is required. Use --netdb flag or ensure I2P is installed in a standard location")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if netdb directory exists
|
|
||||||
if _, err := os.Stat(netdbPath); os.IsNotExist(err) {
|
|
||||||
return fmt.Errorf("netDb directory does not exist: %s", netdbPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("Diagnosing RouterInfo files in: %s\n", netdbPath)
|
|
||||||
fmt.Printf("Maximum file age: %v\n", maxAge)
|
|
||||||
fmt.Printf("Remove bad files: %v\n", removeBad)
|
|
||||||
fmt.Println()
|
|
||||||
|
|
||||||
// Compile regex for RouterInfo files
|
|
||||||
routerInfoPattern, err := regexp.Compile(`^routerInfo-[A-Za-z0-9-=~]+\.dat$`)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to compile regex pattern: %v", err)
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
if err := validateNetDbPath(config.netdbPath); err != nil {
|
||||||
totalFiles int
|
return err
|
||||||
tooOldFiles int
|
}
|
||||||
corruptedFiles int
|
|
||||||
validFiles int
|
|
||||||
removedFiles int
|
|
||||||
)
|
|
||||||
|
|
||||||
// Walk through netDb directory
|
printDiagnosisHeader(config)
|
||||||
err = filepath.WalkDir(netdbPath, func(path string, d fs.DirEntry, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
if verbose {
|
|
||||||
fmt.Printf("Error accessing path %s: %v\n", path, err)
|
|
||||||
}
|
|
||||||
return nil // Continue processing other files
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip directories
|
routerInfoPattern, err := compileRouterInfoPattern()
|
||||||
if d.IsDir() {
|
if err != nil {
|
||||||
return nil
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if file matches RouterInfo pattern
|
stats := &diagnosisStats{}
|
||||||
if !routerInfoPattern.MatchString(d.Name()) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
totalFiles++
|
err = filepath.WalkDir(config.netdbPath, func(path string, d fs.DirEntry, err error) error {
|
||||||
|
return processRouterInfoFile(path, d, err, routerInfoPattern, config, stats)
|
||||||
// Get file info
|
|
||||||
info, err := d.Info()
|
|
||||||
if err != nil {
|
|
||||||
if verbose {
|
|
||||||
fmt.Printf("Error getting file info for %s: %v\n", path, err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check file age
|
|
||||||
age := time.Since(info.ModTime())
|
|
||||||
if age > maxAge {
|
|
||||||
tooOldFiles++
|
|
||||||
if verbose {
|
|
||||||
fmt.Printf("SKIP (too old): %s (age: %v)\n", path, age)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to read and parse the file
|
|
||||||
routerBytes, err := os.ReadFile(path)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("ERROR reading %s: %v\n", path, err)
|
|
||||||
corruptedFiles++
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to parse RouterInfo - using same approach as the reseed server
|
|
||||||
riStruct, remainder, err := router_info.ReadRouterInfo(routerBytes)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("CORRUPTED: %s - %v\n", path, err)
|
|
||||||
if len(remainder) > 0 {
|
|
||||||
fmt.Printf(" Leftover data: %d bytes\n", len(remainder))
|
|
||||||
if verbose {
|
|
||||||
maxBytes := len(remainder)
|
|
||||||
if maxBytes > 50 {
|
|
||||||
maxBytes = 50
|
|
||||||
}
|
|
||||||
fmt.Printf(" First %d bytes of remainder: %x\n", maxBytes, remainder[:maxBytes])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
corruptedFiles++
|
|
||||||
|
|
||||||
// Remove file if requested
|
|
||||||
if removeBad {
|
|
||||||
if removeErr := os.Remove(path); removeErr != nil {
|
|
||||||
fmt.Printf(" ERROR removing file: %v\n", removeErr)
|
|
||||||
} else {
|
|
||||||
fmt.Printf(" REMOVED\n")
|
|
||||||
removedFiles++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Perform additional checks that reseed server does
|
|
||||||
gv, err := riStruct.GoodVersion()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("Version check error %s", err)
|
|
||||||
}
|
|
||||||
if riStruct.Reachable() && riStruct.UnCongested() && gv {
|
|
||||||
validFiles++
|
|
||||||
if verbose {
|
|
||||||
fmt.Printf("OK: %s (reachable, uncongested, good version)\n", path)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
validFiles++
|
|
||||||
if verbose {
|
|
||||||
fmt.Printf("OK: %s (but would be skipped by reseed: reachable=%v uncongested=%v goodversion=%v)\n",
|
|
||||||
path, riStruct.Reachable(), riStruct.UnCongested(), gv)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error walking netDb directory: %v", err)
|
return fmt.Errorf("error walking netDb directory: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Print summary
|
printDiagnosisSummary(stats, config.removeBad)
|
||||||
fmt.Println("\n=== DIAGNOSIS SUMMARY ===")
|
return nil
|
||||||
fmt.Printf("Total RouterInfo files found: %d\n", totalFiles)
|
}
|
||||||
fmt.Printf("Files too old (skipped): %d\n", tooOldFiles)
|
|
||||||
fmt.Printf("Valid files: %d\n", validFiles)
|
// diagnosisConfig holds all configuration parameters for diagnosis
|
||||||
fmt.Printf("Corrupted files: %d\n", corruptedFiles)
|
type diagnosisConfig struct {
|
||||||
if removeBad {
|
netdbPath string
|
||||||
fmt.Printf("Files removed: %d\n", removedFiles)
|
maxAge time.Duration
|
||||||
|
removeBad bool
|
||||||
|
verbose bool
|
||||||
|
debug bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// diagnosisStats tracks file processing statistics
|
||||||
|
type diagnosisStats struct {
|
||||||
|
totalFiles int
|
||||||
|
tooOldFiles int
|
||||||
|
corruptedFiles int
|
||||||
|
validFiles int
|
||||||
|
removedFiles int
|
||||||
|
}
|
||||||
|
|
||||||
|
// extractDiagnosisConfig extracts and validates configuration from CLI context
|
||||||
|
func extractDiagnosisConfig(ctx *cli.Context) (*diagnosisConfig, error) {
|
||||||
|
config := &diagnosisConfig{
|
||||||
|
netdbPath: ctx.String("netdb"),
|
||||||
|
maxAge: ctx.Duration("max-age"),
|
||||||
|
removeBad: ctx.Bool("remove-bad"),
|
||||||
|
verbose: ctx.Bool("verbose"),
|
||||||
|
debug: ctx.Bool("debug"),
|
||||||
}
|
}
|
||||||
|
|
||||||
if corruptedFiles > 0 {
|
// Set debug mode if requested
|
||||||
fmt.Printf("\nFound %d corrupted RouterInfo files causing parsing errors.\n", corruptedFiles)
|
if config.debug {
|
||||||
|
os.Setenv("I2P_DEBUG", "true")
|
||||||
|
fmt.Println("Debug mode enabled (I2P_DEBUG=true)")
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.netdbPath == "" {
|
||||||
|
return nil, fmt.Errorf("netDb path is required. Use --netdb flag or ensure I2P is installed in a standard location")
|
||||||
|
}
|
||||||
|
|
||||||
|
return config, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateNetDbPath checks if the netDb directory exists
|
||||||
|
func validateNetDbPath(netdbPath string) error {
|
||||||
|
if _, err := os.Stat(netdbPath); os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("netDb directory does not exist: %s", netdbPath)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// printDiagnosisHeader prints the diagnosis configuration information
|
||||||
|
func printDiagnosisHeader(config *diagnosisConfig) {
|
||||||
|
fmt.Printf("Diagnosing RouterInfo files in: %s\n", config.netdbPath)
|
||||||
|
fmt.Printf("Maximum file age: %v\n", config.maxAge)
|
||||||
|
fmt.Printf("Remove bad files: %v\n", config.removeBad)
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
|
||||||
|
// compileRouterInfoPattern compiles the regex pattern for RouterInfo files
|
||||||
|
func compileRouterInfoPattern() (*regexp.Regexp, error) {
|
||||||
|
pattern, err := regexp.Compile(`^routerInfo-[A-Za-z0-9-=~]+\.dat$`)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to compile regex pattern: %v", err)
|
||||||
|
}
|
||||||
|
return pattern, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// processRouterInfoFile handles individual RouterInfo file processing
|
||||||
|
func processRouterInfoFile(path string, d fs.DirEntry, err error, pattern *regexp.Regexp, config *diagnosisConfig, stats *diagnosisStats) error {
|
||||||
|
if err != nil {
|
||||||
|
if config.verbose {
|
||||||
|
fmt.Printf("Error accessing path %s: %v\n", path, err)
|
||||||
|
}
|
||||||
|
return nil // Continue processing other files
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip directories
|
||||||
|
if d.IsDir() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if file matches RouterInfo pattern
|
||||||
|
if !pattern.MatchString(d.Name()) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.totalFiles++
|
||||||
|
|
||||||
|
// Get file info and check age
|
||||||
|
if shouldSkipOldFile(path, d, config, stats) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to read and parse the RouterInfo file
|
||||||
|
return analyzeRouterInfoFile(path, config, stats)
|
||||||
|
}
|
||||||
|
|
||||||
|
// shouldSkipOldFile checks if file should be skipped due to age
|
||||||
|
func shouldSkipOldFile(path string, d fs.DirEntry, config *diagnosisConfig, stats *diagnosisStats) bool {
|
||||||
|
info, err := d.Info()
|
||||||
|
if err != nil {
|
||||||
|
if config.verbose {
|
||||||
|
fmt.Printf("Error getting file info for %s: %v\n", path, err)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
age := time.Since(info.ModTime())
|
||||||
|
if age > config.maxAge {
|
||||||
|
stats.tooOldFiles++
|
||||||
|
if config.verbose {
|
||||||
|
fmt.Printf("SKIP (too old): %s (age: %v)\n", path, age)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// analyzeRouterInfoFile reads and analyzes a RouterInfo file
|
||||||
|
func analyzeRouterInfoFile(path string, config *diagnosisConfig, stats *diagnosisStats) error {
|
||||||
|
routerBytes, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("ERROR reading %s: %v\n", path, err)
|
||||||
|
stats.corruptedFiles++
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to parse RouterInfo using the same approach as the reseed server
|
||||||
|
riStruct, remainder, err := router_info.ReadRouterInfo(routerBytes)
|
||||||
|
if err != nil {
|
||||||
|
return handleCorruptedFile(path, err, remainder, config, stats)
|
||||||
|
}
|
||||||
|
|
||||||
|
return validateRouterInfo(path, riStruct, config, stats)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleCorruptedFile processes files that fail parsing
|
||||||
|
func handleCorruptedFile(path string, parseErr error, remainder []byte, config *diagnosisConfig, stats *diagnosisStats) error {
|
||||||
|
fmt.Printf("CORRUPTED: %s - %v\n", path, parseErr)
|
||||||
|
if len(remainder) > 0 {
|
||||||
|
fmt.Printf(" Leftover data: %d bytes\n", len(remainder))
|
||||||
|
if config.verbose {
|
||||||
|
maxBytes := len(remainder)
|
||||||
|
if maxBytes > 50 {
|
||||||
|
maxBytes = 50
|
||||||
|
}
|
||||||
|
fmt.Printf(" First %d bytes of remainder: %x\n", maxBytes, remainder[:maxBytes])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stats.corruptedFiles++
|
||||||
|
|
||||||
|
// Remove file if requested
|
||||||
|
if config.removeBad {
|
||||||
|
if removeErr := os.Remove(path); removeErr != nil {
|
||||||
|
fmt.Printf(" ERROR removing file: %v\n", removeErr)
|
||||||
|
} else {
|
||||||
|
fmt.Printf(" REMOVED\n")
|
||||||
|
stats.removedFiles++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateRouterInfo performs additional checks on valid RouterInfo structures
|
||||||
|
func validateRouterInfo(path string, riStruct router_info.RouterInfo, config *diagnosisConfig, stats *diagnosisStats) error {
|
||||||
|
gv, err := riStruct.GoodVersion()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Version check error %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.validFiles++
|
||||||
|
if config.verbose {
|
||||||
|
if riStruct.Reachable() && riStruct.UnCongested() && gv {
|
||||||
|
fmt.Printf("OK: %s (reachable, uncongested, good version)\n", path)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("OK: %s (but would be skipped by reseed: reachable=%v uncongested=%v goodversion=%v)\n",
|
||||||
|
path, riStruct.Reachable(), riStruct.UnCongested(), gv)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// printDiagnosisSummary prints the final diagnosis results
|
||||||
|
func printDiagnosisSummary(stats *diagnosisStats, removeBad bool) {
|
||||||
|
fmt.Println("\n=== DIAGNOSIS SUMMARY ===")
|
||||||
|
fmt.Printf("Total RouterInfo files found: %d\n", stats.totalFiles)
|
||||||
|
fmt.Printf("Files too old (skipped): %d\n", stats.tooOldFiles)
|
||||||
|
fmt.Printf("Valid files: %d\n", stats.validFiles)
|
||||||
|
fmt.Printf("Corrupted files: %d\n", stats.corruptedFiles)
|
||||||
|
if removeBad {
|
||||||
|
fmt.Printf("Files removed: %d\n", stats.removedFiles)
|
||||||
|
}
|
||||||
|
|
||||||
|
if stats.corruptedFiles > 0 {
|
||||||
|
fmt.Printf("\nFound %d corrupted RouterInfo files causing parsing errors.\n", stats.corruptedFiles)
|
||||||
if !removeBad {
|
if !removeBad {
|
||||||
fmt.Println("To remove them, run this command again with --remove-bad flag.")
|
fmt.Println("To remove them, run this command again with --remove-bad flag.")
|
||||||
}
|
}
|
||||||
@@ -219,8 +294,6 @@ func diagnoseRouterInfoFiles(ctx *cli.Context) error {
|
|||||||
} else {
|
} else {
|
||||||
fmt.Println("\nNo corrupted RouterInfo files found. The parsing errors may be transient.")
|
fmt.Println("\nNo corrupted RouterInfo files found. The parsing errors may be transient.")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// findDefaultNetDbPath attempts to find the default netDb path for the current system
|
// findDefaultNetDbPath attempts to find the default netDb path for the current system
|
||||||
|
520
cmd/reseed.go
520
cmd/reseed.go
@@ -133,7 +133,7 @@ func NewReseedCommand() *cli.Command {
|
|||||||
},
|
},
|
||||||
&cli.IntFlag{
|
&cli.IntFlag{
|
||||||
Name: "numRi",
|
Name: "numRi",
|
||||||
Value: 25,
|
Value: 61,
|
||||||
Usage: "Number of routerInfos to include in each su3 file",
|
Usage: "Number of routerInfos to include in each su3 file",
|
||||||
},
|
},
|
||||||
&cli.IntFlag{
|
&cli.IntFlag{
|
||||||
@@ -472,52 +472,78 @@ func setupI2PKeys(c *cli.Context, tlsConfig *tlsConfiguration) (i2pkeys.I2PKeys,
|
|||||||
return i2pkey, nil
|
return i2pkey, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// loadOrGenerateOnionKey loads an existing onion key from file or generates a new one.
|
||||||
|
func loadOrGenerateOnionKey(keyPath string) ([]byte, error) {
|
||||||
|
if _, err := os.Stat(keyPath); err == nil {
|
||||||
|
key, err := ioutil.ReadFile(keyPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := ed25519.GenerateKey(nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return []byte(key.PrivateKey()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// configureOnionTlsHost sets up the onion TLS hostname if not already configured.
|
||||||
|
func configureOnionTlsHost(tlsConfig *tlsConfiguration, onionKey []byte) {
|
||||||
|
if tlsConfig.onionTlsHost == "" {
|
||||||
|
tlsConfig.onionTlsHost = torutil.OnionServiceIDFromPrivateKey(ed25519.PrivateKey(onionKey)) + ".onion"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// configureOnionTlsPaths sets up default paths for TLS key and certificate files.
|
||||||
|
func configureOnionTlsPaths(tlsConfig *tlsConfiguration) {
|
||||||
|
if tlsConfig.onionTlsKey == "" {
|
||||||
|
tlsConfig.onionTlsKey = tlsConfig.onionTlsHost + ".pem"
|
||||||
|
}
|
||||||
|
|
||||||
|
if tlsConfig.onionTlsCert == "" {
|
||||||
|
tlsConfig.onionTlsCert = tlsConfig.onionTlsHost + ".crt"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// setupOnionTlsCertificate creates or validates TLS certificates for onion services.
|
||||||
|
func setupOnionTlsCertificate(c *cli.Context, tlsConfig *tlsConfiguration) error {
|
||||||
|
if tlsConfig.onionTlsHost == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
auto := c.Bool("yes")
|
||||||
|
ignore := c.Bool("trustProxy")
|
||||||
|
if !ignore {
|
||||||
|
return checkOrNewTLSCert(tlsConfig.onionTlsHost, &tlsConfig.onionTlsCert, &tlsConfig.onionTlsKey, auto)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// setupOnionKeys configures Onion service keys and TLS certificates if Onion protocol is enabled.
|
// setupOnionKeys configures Onion service keys and TLS certificates if Onion protocol is enabled.
|
||||||
func setupOnionKeys(c *cli.Context, tlsConfig *tlsConfiguration) error {
|
func setupOnionKeys(c *cli.Context, tlsConfig *tlsConfiguration) error {
|
||||||
if c.Bool("onion") {
|
if !c.Bool("onion") {
|
||||||
var ok []byte
|
return nil
|
||||||
var err error
|
}
|
||||||
|
|
||||||
if _, err = os.Stat(c.String("onionKey")); err == nil {
|
onionKey, err := loadOrGenerateOnionKey(c.String("onionKey"))
|
||||||
ok, err = ioutil.ReadFile(c.String("onionKey"))
|
if err != nil {
|
||||||
if err != nil {
|
lgr.WithError(err).Fatal("Fatal error")
|
||||||
lgr.WithError(err).Fatal("Fatal error")
|
}
|
||||||
}
|
|
||||||
} else {
|
|
||||||
key, err := ed25519.GenerateKey(nil)
|
|
||||||
if err != nil {
|
|
||||||
lgr.WithError(err).Fatal("Fatal error")
|
|
||||||
}
|
|
||||||
ok = []byte(key.PrivateKey())
|
|
||||||
}
|
|
||||||
|
|
||||||
if tlsConfig.onionTlsHost == "" {
|
configureOnionTlsHost(tlsConfig, onionKey)
|
||||||
tlsConfig.onionTlsHost = torutil.OnionServiceIDFromPrivateKey(ed25519.PrivateKey(ok)) + ".onion"
|
|
||||||
}
|
|
||||||
|
|
||||||
err = ioutil.WriteFile(c.String("onionKey"), ok, 0o644)
|
err = ioutil.WriteFile(c.String("onionKey"), onionKey, 0o644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
lgr.WithError(err).Fatal("Fatal error")
|
lgr.WithError(err).Fatal("Fatal error")
|
||||||
}
|
}
|
||||||
|
|
||||||
if tlsConfig.onionTlsHost != "" {
|
configureOnionTlsPaths(tlsConfig)
|
||||||
if tlsConfig.onionTlsKey == "" {
|
|
||||||
tlsConfig.onionTlsKey = tlsConfig.onionTlsHost + ".pem"
|
|
||||||
}
|
|
||||||
|
|
||||||
if tlsConfig.onionTlsCert == "" {
|
err = setupOnionTlsCertificate(c, tlsConfig)
|
||||||
tlsConfig.onionTlsCert = tlsConfig.onionTlsHost + ".crt"
|
if err != nil {
|
||||||
}
|
lgr.WithError(err).Fatal("Fatal error")
|
||||||
|
|
||||||
auto := c.Bool("yes")
|
|
||||||
ignore := c.Bool("trustProxy")
|
|
||||||
if !ignore {
|
|
||||||
err := checkOrNewTLSCert(tlsConfig.onionTlsHost, &tlsConfig.onionTlsCert, &tlsConfig.onionTlsKey, auto)
|
|
||||||
if err != nil {
|
|
||||||
lgr.WithError(err).Fatal("Fatal error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -760,45 +786,46 @@ func reseedI2PWithContext(ctx context.Context, c *cli.Context, i2pTlsCert, i2pTl
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// startConfiguredServers starts all enabled server protocols (Onion, I2P, HTTP/HTTPS) with proper coordination.
|
// startOnionServer launches the onion server in a goroutine if enabled.
|
||||||
func startConfiguredServers(c *cli.Context, tlsConfig *tlsConfiguration, i2pkey i2pkeys.I2PKeys, reseeder *reseed.ReseederImpl) {
|
func startOnionServer(ctx context.Context, c *cli.Context, tlsConfig *tlsConfiguration, reseeder *reseed.ReseederImpl, wg *sync.WaitGroup, errChan chan<- error) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
if !c.Bool("onion") {
|
||||||
defer cancel()
|
return
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
errChan := make(chan error, 3) // Buffer for up to 3 server errors
|
|
||||||
|
|
||||||
// Start onion server if enabled
|
|
||||||
if c.Bool("onion") {
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
lgr.WithField("service", "onion").Debug("Onion server starting")
|
|
||||||
if err := reseedOnionWithContext(ctx, c, tlsConfig.onionTlsCert, tlsConfig.onionTlsKey, reseeder); err != nil {
|
|
||||||
select {
|
|
||||||
case errChan <- fmt.Errorf("onion server error: %w", err):
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start I2P server if enabled
|
wg.Add(1)
|
||||||
if c.Bool("i2p") {
|
go func() {
|
||||||
wg.Add(1)
|
defer wg.Done()
|
||||||
go func() {
|
lgr.WithField("service", "onion").Debug("Onion server starting")
|
||||||
defer wg.Done()
|
if err := reseedOnionWithContext(ctx, c, tlsConfig.onionTlsCert, tlsConfig.onionTlsKey, reseeder); err != nil {
|
||||||
lgr.WithField("service", "i2p").Debug("I2P server starting")
|
select {
|
||||||
if err := reseedI2PWithContext(ctx, c, tlsConfig.i2pTlsCert, tlsConfig.i2pTlsKey, i2pkey, reseeder); err != nil {
|
case errChan <- fmt.Errorf("onion server error: %w", err):
|
||||||
select {
|
default:
|
||||||
case errChan <- fmt.Errorf("i2p server error: %w", err):
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}()
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// startI2PServer launches the I2P server in a goroutine if enabled.
|
||||||
|
func startI2PServer(ctx context.Context, c *cli.Context, tlsConfig *tlsConfiguration, i2pkey i2pkeys.I2PKeys, reseeder *reseed.ReseederImpl, wg *sync.WaitGroup, errChan chan<- error) {
|
||||||
|
if !c.Bool("i2p") {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start HTTP/HTTPS server
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
lgr.WithField("service", "i2p").Debug("I2P server starting")
|
||||||
|
if err := reseedI2PWithContext(ctx, c, tlsConfig.i2pTlsCert, tlsConfig.i2pTlsKey, i2pkey, reseeder); err != nil {
|
||||||
|
select {
|
||||||
|
case errChan <- fmt.Errorf("i2p server error: %w", err):
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// startHTTPServer launches the appropriate HTTP/HTTPS server in a goroutine.
|
||||||
|
func startHTTPServer(ctx context.Context, c *cli.Context, tlsConfig *tlsConfiguration, reseeder *reseed.ReseederImpl, wg *sync.WaitGroup, errChan chan<- error) {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
@@ -820,7 +847,18 @@ func startConfiguredServers(c *cli.Context, tlsConfig *tlsConfiguration, i2pkey
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// setupServerContext initializes the context and error handling infrastructure for server coordination.
|
||||||
|
func setupServerContext() (context.Context, context.CancelFunc, *sync.WaitGroup, chan error) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
errChan := make(chan error, 3) // Buffer for up to 3 server errors
|
||||||
|
return ctx, cancel, &wg, errChan
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitForServerCompletion coordinates server completion and error handling.
|
||||||
|
func waitForServerCompletion(wg *sync.WaitGroup, errChan chan error) {
|
||||||
// Wait for first error or all servers to complete
|
// Wait for first error or all servers to complete
|
||||||
go func() {
|
go func() {
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
@@ -833,184 +871,16 @@ func startConfiguredServers(c *cli.Context, tlsConfig *tlsConfiguration, i2pkey
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func reseedHTTPS(c *cli.Context, tlsCert, tlsKey string, reseeder *reseed.ReseederImpl) {
|
// startConfiguredServers starts all enabled server protocols (Onion, I2P, HTTP/HTTPS) with proper coordination.
|
||||||
server := reseed.NewServer(c.String("prefix"), c.Bool("trustProxy"))
|
func startConfiguredServers(c *cli.Context, tlsConfig *tlsConfiguration, i2pkey i2pkeys.I2PKeys, reseeder *reseed.ReseederImpl) {
|
||||||
server.Reseeder = reseeder
|
ctx, cancel, wg, errChan := setupServerContext()
|
||||||
server.RequestRateLimit = c.Int("ratelimit")
|
defer cancel()
|
||||||
server.WebRateLimit = c.Int("ratelimitweb")
|
|
||||||
server.Addr = net.JoinHostPort(c.String("ip"), c.String("port"))
|
|
||||||
|
|
||||||
// load a blacklist
|
startOnionServer(ctx, c, tlsConfig, reseeder, wg, errChan)
|
||||||
blacklist := reseed.NewBlacklist()
|
startI2PServer(ctx, c, tlsConfig, i2pkey, reseeder, wg, errChan)
|
||||||
server.Blacklist = blacklist
|
startHTTPServer(ctx, c, tlsConfig, reseeder, wg, errChan)
|
||||||
blacklistFile := c.String("blacklist")
|
|
||||||
if "" != blacklistFile {
|
|
||||||
blacklist.LoadFile(blacklistFile)
|
|
||||||
}
|
|
||||||
|
|
||||||
// print stats once in a while
|
waitForServerCompletion(wg, errChan)
|
||||||
if c.Duration("stats") != 0 {
|
|
||||||
go func() {
|
|
||||||
var mem runtime.MemStats
|
|
||||||
for range time.Tick(c.Duration("stats")) {
|
|
||||||
runtime.ReadMemStats(&mem)
|
|
||||||
lgr.WithField("total_allocs_kb", mem.TotalAlloc/1024).WithField("allocs_kb", mem.Alloc/1024).WithField("mallocs", mem.Mallocs).WithField("num_gc", mem.NumGC).Debug("Memory stats")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
lgr.WithField("address", server.Addr).Debug("HTTPS server started")
|
|
||||||
if err := server.ListenAndServeTLS(tlsCert, tlsKey); err != nil {
|
|
||||||
lgr.WithError(err).Fatal("Fatal error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func reseedHTTP(c *cli.Context, reseeder *reseed.ReseederImpl) {
|
|
||||||
server := reseed.NewServer(c.String("prefix"), c.Bool("trustProxy"))
|
|
||||||
server.RequestRateLimit = c.Int("ratelimit")
|
|
||||||
server.WebRateLimit = c.Int("ratelimitweb")
|
|
||||||
server.Reseeder = reseeder
|
|
||||||
server.Addr = net.JoinHostPort(c.String("ip"), c.String("port"))
|
|
||||||
|
|
||||||
// load a blacklist
|
|
||||||
blacklist := reseed.NewBlacklist()
|
|
||||||
server.Blacklist = blacklist
|
|
||||||
blacklistFile := c.String("blacklist")
|
|
||||||
if "" != blacklistFile {
|
|
||||||
blacklist.LoadFile(blacklistFile)
|
|
||||||
}
|
|
||||||
|
|
||||||
// print stats once in a while
|
|
||||||
if c.Duration("stats") != 0 {
|
|
||||||
go func() {
|
|
||||||
var mem runtime.MemStats
|
|
||||||
for range time.Tick(c.Duration("stats")) {
|
|
||||||
runtime.ReadMemStats(&mem)
|
|
||||||
lgr.WithField("total_allocs_kb", mem.TotalAlloc/1024).WithField("allocs_kb", mem.Alloc/1024).WithField("mallocs", mem.Mallocs).WithField("num_gc", mem.NumGC).Debug("Memory stats")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
lgr.WithField("address", server.Addr).Debug("HTTP server started")
|
|
||||||
if err := server.ListenAndServe(); err != nil {
|
|
||||||
lgr.WithError(err).Fatal("Fatal error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func reseedOnion(c *cli.Context, onionTlsCert, onionTlsKey string, reseeder *reseed.ReseederImpl) {
|
|
||||||
server := reseed.NewServer(c.String("prefix"), c.Bool("trustProxy"))
|
|
||||||
server.Reseeder = reseeder
|
|
||||||
server.Addr = net.JoinHostPort(c.String("ip"), c.String("port"))
|
|
||||||
|
|
||||||
// load a blacklist
|
|
||||||
blacklist := reseed.NewBlacklist()
|
|
||||||
server.Blacklist = blacklist
|
|
||||||
blacklistFile := c.String("blacklist")
|
|
||||||
if "" != blacklistFile {
|
|
||||||
blacklist.LoadFile(blacklistFile)
|
|
||||||
}
|
|
||||||
|
|
||||||
// print stats once in a while
|
|
||||||
if c.Duration("stats") != 0 {
|
|
||||||
go func() {
|
|
||||||
var mem runtime.MemStats
|
|
||||||
for range time.Tick(c.Duration("stats")) {
|
|
||||||
runtime.ReadMemStats(&mem)
|
|
||||||
lgr.WithField("total_allocs_kb", mem.TotalAlloc/1024).WithField("allocs_kb", mem.Alloc/1024).WithField("mallocs", mem.Mallocs).WithField("num_gc", mem.NumGC).Debug("Memory stats")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
port, err := strconv.Atoi(c.String("port"))
|
|
||||||
if err != nil {
|
|
||||||
lgr.WithError(err).Fatal("Fatal error")
|
|
||||||
}
|
|
||||||
port += 1
|
|
||||||
if _, err := os.Stat(c.String("onionKey")); err == nil {
|
|
||||||
ok, err := ioutil.ReadFile(c.String("onionKey"))
|
|
||||||
if err != nil {
|
|
||||||
lgr.WithError(err).Fatal("Fatal error")
|
|
||||||
} else {
|
|
||||||
if onionTlsCert != "" && onionTlsKey != "" {
|
|
||||||
tlc := &tor.ListenConf{
|
|
||||||
LocalPort: port,
|
|
||||||
Key: ed25519.PrivateKey(ok),
|
|
||||||
RemotePorts: []int{443},
|
|
||||||
Version3: true,
|
|
||||||
NonAnonymous: c.Bool("singleOnion"),
|
|
||||||
DiscardKey: false,
|
|
||||||
}
|
|
||||||
if err := server.ListenAndServeOnionTLS(nil, tlc, onionTlsCert, onionTlsKey); err != nil {
|
|
||||||
lgr.WithError(err).Fatal("Fatal error")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
tlc := &tor.ListenConf{
|
|
||||||
LocalPort: port,
|
|
||||||
Key: ed25519.PrivateKey(ok),
|
|
||||||
RemotePorts: []int{80},
|
|
||||||
Version3: true,
|
|
||||||
NonAnonymous: c.Bool("singleOnion"),
|
|
||||||
DiscardKey: false,
|
|
||||||
}
|
|
||||||
if err := server.ListenAndServeOnion(nil, tlc); err != nil {
|
|
||||||
lgr.WithError(err).Fatal("Fatal error")
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if os.IsNotExist(err) {
|
|
||||||
tlc := &tor.ListenConf{
|
|
||||||
LocalPort: port,
|
|
||||||
RemotePorts: []int{80},
|
|
||||||
Version3: true,
|
|
||||||
NonAnonymous: c.Bool("singleOnion"),
|
|
||||||
DiscardKey: false,
|
|
||||||
}
|
|
||||||
if err := server.ListenAndServeOnion(nil, tlc); err != nil {
|
|
||||||
lgr.WithError(err).Fatal("Fatal error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
lgr.WithField("address", server.Addr).Debug("Onion server started")
|
|
||||||
}
|
|
||||||
|
|
||||||
func reseedI2P(c *cli.Context, i2pTlsCert, i2pTlsKey string, i2pIdentKey i2pkeys.I2PKeys, reseeder *reseed.ReseederImpl) {
|
|
||||||
server := reseed.NewServer(c.String("prefix"), c.Bool("trustProxy"))
|
|
||||||
server.RequestRateLimit = c.Int("ratelimit")
|
|
||||||
server.WebRateLimit = c.Int("ratelimitweb")
|
|
||||||
server.Reseeder = reseeder
|
|
||||||
server.Addr = net.JoinHostPort(c.String("ip"), c.String("port"))
|
|
||||||
|
|
||||||
// load a blacklist
|
|
||||||
blacklist := reseed.NewBlacklist()
|
|
||||||
server.Blacklist = blacklist
|
|
||||||
blacklistFile := c.String("blacklist")
|
|
||||||
if "" != blacklistFile {
|
|
||||||
blacklist.LoadFile(blacklistFile)
|
|
||||||
}
|
|
||||||
|
|
||||||
// print stats once in a while
|
|
||||||
if c.Duration("stats") != 0 {
|
|
||||||
go func() {
|
|
||||||
var mem runtime.MemStats
|
|
||||||
for range time.Tick(c.Duration("stats")) {
|
|
||||||
runtime.ReadMemStats(&mem)
|
|
||||||
lgr.WithField("total_allocs_kb", mem.TotalAlloc/1024).WithField("allocs_kb", mem.Alloc/1024).WithField("mallocs", mem.Mallocs).WithField("num_gc", mem.NumGC).Debug("Memory stats")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
port, err := strconv.Atoi(c.String("port"))
|
|
||||||
if err != nil {
|
|
||||||
lgr.WithError(err).Fatal("Fatal error")
|
|
||||||
}
|
|
||||||
port += 1
|
|
||||||
if i2pTlsCert != "" && i2pTlsKey != "" {
|
|
||||||
if err := server.ListenAndServeI2PTLS(c.String("samaddr"), i2pIdentKey, i2pTlsCert, i2pTlsKey); err != nil {
|
|
||||||
lgr.WithError(err).Fatal("Fatal error")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if err := server.ListenAndServeI2P(c.String("samaddr"), i2pIdentKey); err != nil {
|
|
||||||
lgr.WithError(err).Fatal("Fatal error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
lgr.WithField("address", server.Addr).Debug("Onion server started")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSupplementalNetDb(remote, password, path, samaddr string) {
|
func getSupplementalNetDb(remote, password, path, samaddr string) {
|
||||||
@@ -1026,78 +896,112 @@ func getSupplementalNetDb(remote, password, path, samaddr string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func downloadRemoteNetDB(remote, password, path, samaddr string) error {
|
// normalizeRemoteURL ensures the remote URL has proper HTTP protocol and netDb.tar.gz suffix.
|
||||||
|
func normalizeRemoteURL(remote string) (string, error) {
|
||||||
var hremote string
|
var hremote string
|
||||||
if !strings.HasPrefix("http://", remote) && !strings.HasPrefix("https://", remote) {
|
if !strings.HasPrefix(remote, "http://") && !strings.HasPrefix(remote, "https://") {
|
||||||
hremote = "http://" + remote
|
hremote = "http://" + remote
|
||||||
|
} else {
|
||||||
|
hremote = remote
|
||||||
}
|
}
|
||||||
if !strings.HasSuffix(hremote, ".tar.gz") {
|
if !strings.HasSuffix(hremote, ".tar.gz") {
|
||||||
hremote += "/netDb.tar.gz"
|
hremote += "/netDb.tar.gz"
|
||||||
}
|
}
|
||||||
url, err := url.Parse(hremote)
|
return hremote, nil
|
||||||
if err != nil {
|
}
|
||||||
return err
|
|
||||||
}
|
// createGarlicHTTPClient creates an HTTP client configured to use I2P's SAM interface.
|
||||||
httpRequest := http.Request{
|
func createGarlicHTTPClient(samaddr, password string) (*http.Client, *onramp.Garlic, error) {
|
||||||
URL: url,
|
|
||||||
Header: http.Header{},
|
|
||||||
}
|
|
||||||
garlic, err := onramp.NewGarlic("reseed-client", samaddr, onramp.OPT_WIDE)
|
garlic, err := onramp.NewGarlic("reseed-client", samaddr, onramp.OPT_WIDE)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer garlic.Close()
|
|
||||||
httpRequest.Header.Add(http.CanonicalHeaderKey("reseed-password"), password)
|
|
||||||
httpRequest.Header.Add(http.CanonicalHeaderKey("x-user-agent"), reseed.I2pUserAgent)
|
|
||||||
transport := http.Transport{
|
transport := http.Transport{
|
||||||
Dial: garlic.Dial,
|
Dial: garlic.Dial,
|
||||||
}
|
}
|
||||||
client := http.Client{
|
client := http.Client{
|
||||||
Transport: &transport,
|
Transport: &transport,
|
||||||
}
|
}
|
||||||
if resp, err := client.Do(&httpRequest); err != nil {
|
return &client, garlic, nil
|
||||||
return err
|
}
|
||||||
} else {
|
|
||||||
if bodyBytes, err := ioutil.ReadAll(resp.Body); err != nil {
|
// downloadAndSaveNetDB downloads the netDb archive from the remote URL and saves it locally.
|
||||||
return err
|
func downloadAndSaveNetDB(client *http.Client, url *url.URL, password string) error {
|
||||||
} else {
|
httpRequest := http.Request{
|
||||||
if err := ioutil.WriteFile("netDb.tar.gz", bodyBytes, 0o644); err != nil {
|
URL: url,
|
||||||
return err
|
Header: http.Header{},
|
||||||
} else {
|
}
|
||||||
dbPath := filepath.Join(path, "reseed-netDb")
|
httpRequest.Header.Add(http.CanonicalHeaderKey("reseed-password"), password)
|
||||||
if err := untar.UntarFile("netDb.tar.gz", dbPath); err != nil {
|
httpRequest.Header.Add(http.CanonicalHeaderKey("x-user-agent"), reseed.I2pUserAgent)
|
||||||
return err
|
|
||||||
} else {
|
resp, err := client.Do(&httpRequest)
|
||||||
// For example...
|
if err != nil {
|
||||||
opt := copy.Options{
|
return err
|
||||||
Skip: func(info os.FileInfo, src, dest string) (bool, error) {
|
}
|
||||||
srcBase := filepath.Base(src)
|
defer resp.Body.Close()
|
||||||
dstBase := filepath.Base(dest)
|
|
||||||
if info.IsDir() {
|
bodyBytes, err := ioutil.ReadAll(resp.Body)
|
||||||
return false, nil
|
if err != nil {
|
||||||
}
|
return err
|
||||||
if srcBase == dstBase {
|
}
|
||||||
log.Println("Ignoring existing RI", srcBase, dstBase)
|
|
||||||
return true, nil
|
return ioutil.WriteFile("netDb.tar.gz", bodyBytes, 0o644)
|
||||||
}
|
}
|
||||||
return false, nil
|
|
||||||
},
|
// extractAndCopyNetDB extracts the netDb archive and copies it to the target directory.
|
||||||
}
|
func extractAndCopyNetDB(path string) error {
|
||||||
if err := copy.Copy(dbPath, path, opt); err != nil {
|
dbPath := filepath.Join(path, "reseed-netDb")
|
||||||
return err
|
if err := untar.UntarFile("netDb.tar.gz", dbPath); err != nil {
|
||||||
} else {
|
return err
|
||||||
if err := os.RemoveAll(dbPath); err != nil {
|
}
|
||||||
return err
|
|
||||||
} else {
|
opt := copy.Options{
|
||||||
if err := os.RemoveAll("netDb.tar.gz"); err != nil {
|
Skip: func(info os.FileInfo, src, dest string) (bool, error) {
|
||||||
return err
|
srcBase := filepath.Base(src)
|
||||||
}
|
dstBase := filepath.Base(dest)
|
||||||
return nil
|
if info.IsDir() {
|
||||||
}
|
return false, nil
|
||||||
}
|
}
|
||||||
}
|
if srcBase == dstBase {
|
||||||
}
|
log.Println("Ignoring existing RI", srcBase, dstBase)
|
||||||
}
|
return true, nil
|
||||||
}
|
}
|
||||||
|
return false, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := copy.Copy(dbPath, path, opt); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean up temporary files
|
||||||
|
if err := os.RemoveAll(dbPath); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.RemoveAll("netDb.tar.gz")
|
||||||
|
}
|
||||||
|
|
||||||
|
func downloadRemoteNetDB(remote, password, path, samaddr string) error {
|
||||||
|
hremote, err := normalizeRemoteURL(remote)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
url, err := url.Parse(hremote)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
client, garlic, err := createGarlicHTTPClient(samaddr, password)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer garlic.Close()
|
||||||
|
|
||||||
|
if err := downloadAndSaveNetDB(client, url, password); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return extractAndCopyNetDB(path)
|
||||||
}
|
}
|
||||||
|
11
cmd/utils.go
11
cmd/utils.go
@@ -144,8 +144,17 @@ func checkAcmeCertificateRenewal(tlsCert, tlsKey *string, tlsHost, signer, cadir
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Parse the certificate to populate the Leaf field if it's nil
|
||||||
|
if tlsConfig.Certificates[0].Leaf == nil && len(tlsConfig.Certificates[0].Certificate) > 0 {
|
||||||
|
cert, err := x509.ParseCertificate(tlsConfig.Certificates[0].Certificate[0])
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("failed to parse certificate: %w", err)
|
||||||
|
}
|
||||||
|
tlsConfig.Certificates[0].Leaf = cert
|
||||||
|
}
|
||||||
|
|
||||||
// Check if certificate expires within 48 hours (time until expiration < 48 hours)
|
// Check if certificate expires within 48 hours (time until expiration < 48 hours)
|
||||||
if time.Until(tlsConfig.Certificates[0].Leaf.NotAfter) < (time.Hour * 48) {
|
if tlsConfig.Certificates[0].Leaf != nil && time.Until(tlsConfig.Certificates[0].Leaf.NotAfter) < (time.Hour*48) {
|
||||||
return renewExistingAcmeCertificate(tlsHost, signer, cadirurl, tlsCert, tlsKey)
|
return renewExistingAcmeCertificate(tlsHost, signer, cadirurl, tlsCert, tlsKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -6,7 +6,10 @@ import (
|
|||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"crypto/x509/pkix"
|
"crypto/x509/pkix"
|
||||||
|
"encoding/pem"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
@@ -142,3 +145,139 @@ func TestOldBuggyLogic(t *testing.T) {
|
|||||||
t.Error("New logic should indicate renewal needed for certificate expiring in 24 hours")
|
t.Error("New logic should indicate renewal needed for certificate expiring in 24 hours")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test for Bug #1: Nil Pointer Dereference in TLS Certificate Renewal
|
||||||
|
func TestNilPointerDereferenceTLSRenewal(t *testing.T) {
|
||||||
|
// Create a temporary certificate and key file
|
||||||
|
cert, key, err := generateTestCertificate()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to generate test certificate: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create temporary files
|
||||||
|
certFile := "test-cert.pem"
|
||||||
|
keyFile := "test-key.pem"
|
||||||
|
|
||||||
|
// Write certificate and key to files
|
||||||
|
if err := os.WriteFile(certFile, cert, 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to write cert file: %v", err)
|
||||||
|
}
|
||||||
|
defer os.Remove(certFile)
|
||||||
|
|
||||||
|
if err := os.WriteFile(keyFile, key, 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to write key file: %v", err)
|
||||||
|
}
|
||||||
|
defer os.Remove(keyFile)
|
||||||
|
|
||||||
|
// Create a minimal test to reproduce the exact nil pointer issue
|
||||||
|
// This directly tests what happens when tls.LoadX509KeyPair is used
|
||||||
|
// and then Leaf is accessed without checking if it's nil
|
||||||
|
tlsCert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to load X509 key pair: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This demonstrates the bug: tlsCert.Leaf is nil after LoadX509KeyPair
|
||||||
|
if tlsCert.Leaf == nil {
|
||||||
|
t.Log("Confirmed: tlsCert.Leaf is nil after LoadX509KeyPair - this causes the bug")
|
||||||
|
}
|
||||||
|
|
||||||
|
// This would panic with nil pointer dereference before the fix:
|
||||||
|
// tlsCert.Leaf.NotAfter would panic
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
t.Log("Caught panic accessing tlsCert.Leaf.NotAfter:", r)
|
||||||
|
// This panic is expected before the fix is applied
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// This should reproduce the exact bug from line 147 in utils.go
|
||||||
|
// Before fix: panics with nil pointer dereference
|
||||||
|
// After fix: should handle gracefully
|
||||||
|
if tlsCert.Leaf != nil {
|
||||||
|
_ = time.Until(tlsCert.Leaf.NotAfter) < (time.Hour * 48)
|
||||||
|
t.Log("No panic occurred - fix may be already applied")
|
||||||
|
} else {
|
||||||
|
// This will panic before the fix
|
||||||
|
_ = time.Until(tlsCert.Leaf.NotAfter) < (time.Hour * 48)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateTestCertificate creates a test certificate and key for testing the nil pointer bug
|
||||||
|
func generateTestCertificate() ([]byte, []byte, error) {
|
||||||
|
// Generate private key
|
||||||
|
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create certificate template - expires in 24 hours to trigger renewal logic
|
||||||
|
template := x509.Certificate{
|
||||||
|
SerialNumber: big.NewInt(1),
|
||||||
|
Subject: pkix.Name{
|
||||||
|
Organization: []string{"Test Org"},
|
||||||
|
Country: []string{"US"},
|
||||||
|
Province: []string{""},
|
||||||
|
Locality: []string{"Test City"},
|
||||||
|
StreetAddress: []string{""},
|
||||||
|
PostalCode: []string{""},
|
||||||
|
},
|
||||||
|
NotBefore: time.Now(),
|
||||||
|
NotAfter: time.Now().Add(24 * time.Hour), // Expires in 24 hours (should trigger renewal)
|
||||||
|
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
|
||||||
|
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||||
|
IPAddresses: nil,
|
||||||
|
DNSNames: []string{"test.example.com"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create certificate
|
||||||
|
certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode certificate to PEM
|
||||||
|
certPEM := pem.EncodeToMemory(&pem.Block{
|
||||||
|
Type: "CERTIFICATE",
|
||||||
|
Bytes: certDER,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Encode private key to PEM
|
||||||
|
keyPEM := pem.EncodeToMemory(&pem.Block{
|
||||||
|
Type: "RSA PRIVATE KEY",
|
||||||
|
Bytes: x509.MarshalPKCS1PrivateKey(privateKey),
|
||||||
|
})
|
||||||
|
|
||||||
|
return certPEM, keyPEM, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test for Bug #1 Fix: Certificate Leaf parsing works correctly
|
||||||
|
func TestCertificateLeafParsingFix(t *testing.T) {
|
||||||
|
cert, key, err := generateTestCertificate()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to generate test certificate: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
certFile := "test-cert-fix.pem"
|
||||||
|
keyFile := "test-key-fix.pem"
|
||||||
|
|
||||||
|
if err := os.WriteFile(certFile, cert, 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to write cert file: %v", err)
|
||||||
|
}
|
||||||
|
defer os.Remove(certFile)
|
||||||
|
|
||||||
|
if err := os.WriteFile(keyFile, key, 0644); err != nil {
|
||||||
|
t.Fatalf("Failed to write key file: %v", err)
|
||||||
|
}
|
||||||
|
defer os.Remove(keyFile)
|
||||||
|
|
||||||
|
// Test the fix: our function should handle nil Leaf gracefully
|
||||||
|
shouldRenew, err := checkAcmeCertificateRenewal(&certFile, &keyFile, "test", "test", "https://acme-v02.api.letsencrypt.org/directory")
|
||||||
|
|
||||||
|
// We expect an error (likely ACME-related), but NOT a panic or nil pointer error
|
||||||
|
if err != nil && (strings.Contains(err.Error(), "runtime error") || strings.Contains(err.Error(), "nil pointer")) {
|
||||||
|
t.Errorf("Fix failed: still getting nil pointer error: %v", err)
|
||||||
|
} else {
|
||||||
|
t.Logf("Fix successful: no nil pointer errors (got: %v, shouldRenew: %v)", err, shouldRenew)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
6
go.mod
6
go.mod
@@ -7,9 +7,9 @@ require (
|
|||||||
github.com/eyedeekay/go-i2pd v0.0.0-20220213070306-9807541b2dfc
|
github.com/eyedeekay/go-i2pd v0.0.0-20220213070306-9807541b2dfc
|
||||||
github.com/eyedeekay/unembed v0.0.0-20230123014222-9916b121855b
|
github.com/eyedeekay/unembed v0.0.0-20230123014222-9916b121855b
|
||||||
github.com/go-acme/lego/v4 v4.3.1
|
github.com/go-acme/lego/v4 v4.3.1
|
||||||
github.com/go-i2p/checki2cp v0.0.0-20250223011251-79201ef39571
|
github.com/go-i2p/checki2cp v0.0.0-20250819201001-7a3f89fafac8
|
||||||
github.com/go-i2p/common v0.0.0-20250819190749-01946d9f7ccf
|
github.com/go-i2p/common v0.0.0-20250819190749-01946d9f7ccf
|
||||||
github.com/go-i2p/i2pkeys v0.33.10-0.20241113193422-e10de5e60708
|
github.com/go-i2p/i2pkeys v0.33.92
|
||||||
github.com/go-i2p/logger v0.0.0-20241123010126-3050657e5d0c
|
github.com/go-i2p/logger v0.0.0-20241123010126-3050657e5d0c
|
||||||
github.com/go-i2p/onramp v0.33.92
|
github.com/go-i2p/onramp v0.33.92
|
||||||
github.com/go-i2p/sam3 v0.33.92
|
github.com/go-i2p/sam3 v0.33.92
|
||||||
@@ -49,7 +49,7 @@ require (
|
|||||||
golang.org/x/crypto v0.39.0 // indirect
|
golang.org/x/crypto v0.39.0 // indirect
|
||||||
golang.org/x/net v0.41.0 // indirect
|
golang.org/x/net v0.41.0 // indirect
|
||||||
golang.org/x/sync v0.15.0 // indirect
|
golang.org/x/sync v0.15.0 // indirect
|
||||||
golang.org/x/sys v0.33.0 // indirect
|
golang.org/x/sys v0.35.0 // indirect
|
||||||
gopkg.in/square/go-jose.v2 v2.5.1 // indirect
|
gopkg.in/square/go-jose.v2 v2.5.1 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
|
12
go.sum
12
go.sum
@@ -124,15 +124,15 @@ github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm
|
|||||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||||
github.com/go-i2p/checki2cp v0.0.0-20250223011251-79201ef39571 h1:l/mJzTbwzgycCvv6rGdgGERQleR1J6SpZJ6LZr5yCz4=
|
github.com/go-i2p/checki2cp v0.0.0-20250819201001-7a3f89fafac8 h1:gOYWzWZKSSSeO6VendtDyEuTvR4WKxD5NLIxknDfLB8=
|
||||||
github.com/go-i2p/checki2cp v0.0.0-20250223011251-79201ef39571/go.mod h1:h2Ufc73Qvj+KTkOz6H+JSS4XA7fM/Smqp593daAQNOc=
|
github.com/go-i2p/checki2cp v0.0.0-20250819201001-7a3f89fafac8/go.mod h1:h2Ufc73Qvj+KTkOz6H+JSS4XA7fM/Smqp593daAQNOc=
|
||||||
github.com/go-i2p/common v0.0.0-20250819190749-01946d9f7ccf h1:rWDND6k+wt1jo96H8oZEphSu9Ig9UPGodR94azDRfxo=
|
github.com/go-i2p/common v0.0.0-20250819190749-01946d9f7ccf h1:rWDND6k+wt1jo96H8oZEphSu9Ig9UPGodR94azDRfxo=
|
||||||
github.com/go-i2p/common v0.0.0-20250819190749-01946d9f7ccf/go.mod h1:GD6iti2YU9LPrcESZ6Ty3lgxKGO7324tPhuKfYsJxrQ=
|
github.com/go-i2p/common v0.0.0-20250819190749-01946d9f7ccf/go.mod h1:GD6iti2YU9LPrcESZ6Ty3lgxKGO7324tPhuKfYsJxrQ=
|
||||||
github.com/go-i2p/crypto v0.0.0-20250715200104-0ce55885b9cf h1:R7SX3WbuYX2YH9wCzNup2GY6efLN0j8BRbyeskDYWn8=
|
github.com/go-i2p/crypto v0.0.0-20250715200104-0ce55885b9cf h1:R7SX3WbuYX2YH9wCzNup2GY6efLN0j8BRbyeskDYWn8=
|
||||||
github.com/go-i2p/crypto v0.0.0-20250715200104-0ce55885b9cf/go.mod h1:1Y3NCpVg6OgE3c2VPRQ3QDmWPtDpJYLIyRBA1iJCd3E=
|
github.com/go-i2p/crypto v0.0.0-20250715200104-0ce55885b9cf/go.mod h1:1Y3NCpVg6OgE3c2VPRQ3QDmWPtDpJYLIyRBA1iJCd3E=
|
||||||
github.com/go-i2p/i2pkeys v0.0.0-20241108200332-e4f5ccdff8c4/go.mod h1:m5TlHjPZrU5KbTd7Lr+I2rljyC6aJ88HdkeMQXV0U0E=
|
github.com/go-i2p/i2pkeys v0.0.0-20241108200332-e4f5ccdff8c4/go.mod h1:m5TlHjPZrU5KbTd7Lr+I2rljyC6aJ88HdkeMQXV0U0E=
|
||||||
github.com/go-i2p/i2pkeys v0.33.10-0.20241113193422-e10de5e60708 h1:Tiy9IBwi21maNpK74yCdHursJJMkyH7w87tX1nXGWzg=
|
github.com/go-i2p/i2pkeys v0.33.92 h1:e2vx3vf7tNesaJ8HmAlGPOcfiGM86jzeIGxh27I9J2Y=
|
||||||
github.com/go-i2p/i2pkeys v0.33.10-0.20241113193422-e10de5e60708/go.mod h1:m5TlHjPZrU5KbTd7Lr+I2rljyC6aJ88HdkeMQXV0U0E=
|
github.com/go-i2p/i2pkeys v0.33.92/go.mod h1:BRURQ/twxV0WKjZlFSKki93ivBi+MirZPWudfwTzMpE=
|
||||||
github.com/go-i2p/logger v0.0.0-20241123010126-3050657e5d0c h1:VTiECn3dFEmUlZjto+wOwJ7SSJTHPLyNprQMR5HzIMI=
|
github.com/go-i2p/logger v0.0.0-20241123010126-3050657e5d0c h1:VTiECn3dFEmUlZjto+wOwJ7SSJTHPLyNprQMR5HzIMI=
|
||||||
github.com/go-i2p/logger v0.0.0-20241123010126-3050657e5d0c/go.mod h1:te7Zj3g3oMeIl8uBXAgO62UKmZ6m6kHRNg1Mm+X8Hzk=
|
github.com/go-i2p/logger v0.0.0-20241123010126-3050657e5d0c/go.mod h1:te7Zj3g3oMeIl8uBXAgO62UKmZ6m6kHRNg1Mm+X8Hzk=
|
||||||
github.com/go-i2p/onramp v0.33.92 h1:Dk3A0SGpdEw829rSjW2LqN8o16pUvuhiN0vn36z7Gpc=
|
github.com/go-i2p/onramp v0.33.92 h1:Dk3A0SGpdEw829rSjW2LqN8o16pUvuhiN0vn36z7Gpc=
|
||||||
@@ -613,8 +613,8 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
||||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
302
index.html
302
index.html
@@ -1,302 +0,0 @@
|
|||||||
<html>
|
|
||||||
<head>
|
|
||||||
<title>
|
|
||||||
I2P Reseed Tools
|
|
||||||
</title>
|
|
||||||
<meta name="author" content="eyedeekay" />
|
|
||||||
<meta name="description" content="reseed-tools" />
|
|
||||||
<meta name="keywords" content="master" />
|
|
||||||
<link rel="stylesheet" type="text/css" href="style.css" />
|
|
||||||
<link rel="stylesheet" type="text/css" href="showhider.css" />
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<div id="navbar">
|
|
||||||
<a href="#shownav">
|
|
||||||
Show navigation
|
|
||||||
</a>
|
|
||||||
<div id="shownav">
|
|
||||||
<div id="hidenav">
|
|
||||||
<ul>
|
|
||||||
<li>
|
|
||||||
<a href="..">
|
|
||||||
Up one level ^
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="index.html">
|
|
||||||
index
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="CHANGELOG.html">
|
|
||||||
CHANGELOG
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="content/index.html">
|
|
||||||
content/index.html
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="docs/index.html">
|
|
||||||
docs/index.html
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="index.html">
|
|
||||||
index.html
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="docs/DEBIAN.html">
|
|
||||||
docs/DEBIAN
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="docs/DOCKER.html">
|
|
||||||
docs/DOCKER
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="docs/EXAMPLES.html">
|
|
||||||
docs/EXAMPLES
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="docs/PLUGIN.html">
|
|
||||||
docs/PLUGIN
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="docs/index.html">
|
|
||||||
docs/index
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="docs/SERVICES.html">
|
|
||||||
docs/SERVICES
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="docs/TLS.html">
|
|
||||||
docs/TLS
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="docs/index.html">
|
|
||||||
docs/index.html
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
</ul>
|
|
||||||
<br>
|
|
||||||
<a href="#hidenav">
|
|
||||||
Hide Navigation
|
|
||||||
</a>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<a id="returnhome" href="/">
|
|
||||||
/
|
|
||||||
</a>
|
|
||||||
<h1>
|
|
||||||
I2P Reseed Tools
|
|
||||||
</h1>
|
|
||||||
<p>
|
|
||||||
<img src="content/images/reseed.png" alt="Reseed Tools Poster" />
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
This tool provides a secure and efficient reseed server for the I2P network.
|
|
||||||
There are several utility commands to create, sign, and validate SU3 files.
|
|
||||||
Please note that this requires at least Go version 1.13, and uses Go Modules.
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
Standard reseeds are distributed with the I2P packages. To get your reseed
|
|
||||||
included, apply on
|
|
||||||
<a href="http://zzz.i2p">
|
|
||||||
zzz.i2p
|
|
||||||
</a>
|
|
||||||
.
|
|
||||||
</p>
|
|
||||||
<h2>
|
|
||||||
Dependencies
|
|
||||||
</h2>
|
|
||||||
<p>
|
|
||||||
<code>
|
|
||||||
go
|
|
||||||
</code>
|
|
||||||
,
|
|
||||||
<code>
|
|
||||||
git
|
|
||||||
</code>
|
|
||||||
, and optionally
|
|
||||||
<code>
|
|
||||||
make
|
|
||||||
</code>
|
|
||||||
are required to build the project.
|
|
||||||
Precompiled binaries for most platforms are available at my github mirror
|
|
||||||
<a href="https://github.com/go-i2p/reseed-tools">
|
|
||||||
https://github.com/go-i2p/reseed-tools
|
|
||||||
</a>
|
|
||||||
.
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
In order to install the build-dependencies on Ubuntu or Debian, you may use:
|
|
||||||
</p>
|
|
||||||
<pre><code class="language-sh">sudo apt-get install golang-go git make
|
|
||||||
</code></pre>
|
|
||||||
<h2>
|
|
||||||
Installation
|
|
||||||
</h2>
|
|
||||||
<p>
|
|
||||||
Reseed-tools can be run as a user, as a freestanding service, or be installed
|
|
||||||
as an I2P Plugin. It will attempt to configure itself automatically. You should
|
|
||||||
make sure to set the
|
|
||||||
<code>
|
|
||||||
--signer
|
|
||||||
</code>
|
|
||||||
flag or the
|
|
||||||
<code>
|
|
||||||
RESEED_EMAIL
|
|
||||||
</code>
|
|
||||||
environment variable
|
|
||||||
to configure your signing keys/contact info.
|
|
||||||
</p>
|
|
||||||
<h3>
|
|
||||||
Installation(From Source)
|
|
||||||
</h3>
|
|
||||||
<pre><code>git clone https://i2pgit.org/idk/reseed-tools
|
|
||||||
cd reseed-tools
|
|
||||||
make build
|
|
||||||
# Optionally, if you want to install to /usr/bin/reseed-tools
|
|
||||||
sudo make install
|
|
||||||
</code></pre>
|
|
||||||
<h2>
|
|
||||||
Usage
|
|
||||||
</h2>
|
|
||||||
<h4>
|
|
||||||
Debian/Ubuntu note:
|
|
||||||
</h4>
|
|
||||||
<p>
|
|
||||||
It is possible to create a
|
|
||||||
<code>
|
|
||||||
.deb
|
|
||||||
</code>
|
|
||||||
package using
|
|
||||||
<a href="docs/DEBIAN.md">
|
|
||||||
these instructions
|
|
||||||
</a>
|
|
||||||
.
|
|
||||||
</p>
|
|
||||||
<p>
|
|
||||||
Debian users who are running I2P as a system service must also run the
|
|
||||||
<code>
|
|
||||||
reseed-tools
|
|
||||||
</code>
|
|
||||||
as the same user. This is so that the reseed-tools can access
|
|
||||||
the I2P service’s netDb directory. On Debian and Ubuntu, that user is
|
|
||||||
<code>
|
|
||||||
i2psvc
|
|
||||||
</code>
|
|
||||||
and the netDb directory is:
|
|
||||||
<code>
|
|
||||||
/var/lib/i2p/i2p-config/netDb
|
|
||||||
</code>
|
|
||||||
.
|
|
||||||
</p>
|
|
||||||
<h2>
|
|
||||||
Example Commands:
|
|
||||||
</h2>
|
|
||||||
<h3>
|
|
||||||
Without a webserver, standalone with TLS support
|
|
||||||
</h3>
|
|
||||||
<p>
|
|
||||||
If this is your first time running a reseed server (ie. you don’t have any existing keys),
|
|
||||||
you can simply run the command and follow the prompts to create the appropriate keys, crl and certificates.
|
|
||||||
Afterwards an HTTPS reseed server will start on the default port and generate 6 files in your current directory
|
|
||||||
(a TLS key, certificate and crl, and a su3-file signing key, certificate and crl).
|
|
||||||
</p>
|
|
||||||
<pre><code>reseed-tools reseed --signer=you@mail.i2p --netdb=/home/i2p/.i2p/netDb --tlsHost=your-domain.tld
|
|
||||||
</code></pre>
|
|
||||||
<h3>
|
|
||||||
Locally behind a webserver (reverse proxy setup), preferred:
|
|
||||||
</h3>
|
|
||||||
<p>
|
|
||||||
If you are using a reverse proxy server it may provide the TLS certificate instead.
|
|
||||||
</p>
|
|
||||||
<pre><code>reseed-tools reseed --signer=you@mail.i2p --netdb=/home/i2p/.i2p/netDb --port=8443 --ip=127.0.0.1 --trustProxy
|
|
||||||
</code></pre>
|
|
||||||
<ul>
|
|
||||||
<li>
|
|
||||||
<strong>
|
|
||||||
Usage
|
|
||||||
</strong>
|
|
||||||
<a href="docs/EXAMPLES.md">
|
|
||||||
More examples can be found here.
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<strong>
|
|
||||||
Docker
|
|
||||||
</strong>
|
|
||||||
<a href="docs/DOCKER.md">
|
|
||||||
Docker examples can be found here
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
</ul>
|
|
||||||
<div id="sourcecode">
|
|
||||||
<span id="sourcehead">
|
|
||||||
<strong>
|
|
||||||
Get the source code:
|
|
||||||
</strong>
|
|
||||||
</span>
|
|
||||||
<ul>
|
|
||||||
<li>
|
|
||||||
<a href="https://i2pgit.org/idk/reseed-tools">
|
|
||||||
Source Repository: (https://i2pgit.org/idk/reseed-tools)
|
|
||||||
</a>
|
|
||||||
</li>
|
|
||||||
</ul>
|
|
||||||
</div>
|
|
||||||
<div>
|
|
||||||
<a href="#show">
|
|
||||||
Show license
|
|
||||||
</a>
|
|
||||||
<div id="show">
|
|
||||||
<div id="hide">
|
|
||||||
<pre><code>Copyright (c) 2014 Matt Drollette
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
|
||||||
all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
||||||
</code></pre>
|
|
||||||
<a href="#hide">
|
|
||||||
Hide license
|
|
||||||
</a>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div>
|
|
||||||
<iframe src="https://snowflake.torproject.org/embed.html" width="320" height="240" frameborder="0" scrolling="no"></iframe>
|
|
||||||
</div>
|
|
||||||
<div>
|
|
||||||
<a href="https://geti2p.net/">
|
|
||||||
<img src="i2plogo.png"></img>
|
|
||||||
I2P
|
|
||||||
</a>
|
|
||||||
</div>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
@@ -3,7 +3,7 @@ package reseed
|
|||||||
// Version defines the current release version of the reseed-tools application.
|
// Version defines the current release version of the reseed-tools application.
|
||||||
// This version string is used for compatibility checking, update notifications,
|
// This version string is used for compatibility checking, update notifications,
|
||||||
// and identifying the software version in server responses and logs.
|
// and identifying the software version in server responses and logs.
|
||||||
const Version = "0.3.6"
|
const Version = "0.3.10"
|
||||||
|
|
||||||
// HTTP User-Agent constants for I2P protocol compatibility
|
// HTTP User-Agent constants for I2P protocol compatibility
|
||||||
const (
|
const (
|
||||||
|
@@ -215,7 +215,7 @@ func (srv *Server) reseedHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
su3Bytes, err := srv.Reseeder.PeerSu3Bytes(peer)
|
su3Bytes, err := srv.Reseeder.PeerSu3Bytes(peer)
|
||||||
if nil != err {
|
if nil != err {
|
||||||
lgr.WithError(err).WithField("peer", peer).Error("Error serving su3 %s", err)
|
lgr.WithError(err).WithField("peer", peer).Errorf("Error serving su3 %s", err)
|
||||||
http.Error(w, "500 Unable to serve su3", http.StatusInternalServerError)
|
http.Error(w, "500 Unable to serve su3", http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
209
reseed/server_tokens_test.go
Normal file
209
reseed/server_tokens_test.go
Normal file
@@ -0,0 +1,209 @@
|
|||||||
|
package reseed
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Test for Bug #3: Unbounded Memory Growth in Acceptable Tokens (FIXED)
|
||||||
|
func TestAcceptableTokensMemoryBounds(t *testing.T) {
|
||||||
|
server := &Server{}
|
||||||
|
|
||||||
|
// Test 1: Verify tokens are cleaned up after expiration
|
||||||
|
t.Run("ExpiredTokenCleanup", func(t *testing.T) {
|
||||||
|
// Create some tokens and artificially age them
|
||||||
|
server.acceptables = make(map[string]time.Time)
|
||||||
|
oldTime := time.Now().Add(-5 * time.Minute) // Older than 4-minute expiry
|
||||||
|
recentTime := time.Now()
|
||||||
|
|
||||||
|
server.acceptables["old_token_1"] = oldTime
|
||||||
|
server.acceptables["old_token_2"] = oldTime
|
||||||
|
server.acceptables["recent_token"] = recentTime
|
||||||
|
|
||||||
|
if len(server.acceptables) != 3 {
|
||||||
|
t.Errorf("Expected 3 tokens initially, got %d", len(server.acceptables))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trigger cleanup by calling Acceptable
|
||||||
|
_ = server.Acceptable()
|
||||||
|
|
||||||
|
// Check that old tokens were cleaned up but recent one remains
|
||||||
|
if len(server.acceptables) > 2 {
|
||||||
|
t.Errorf("Expected at most 2 tokens after cleanup, got %d", len(server.acceptables))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify recent token still exists
|
||||||
|
if _, exists := server.acceptables["recent_token"]; !exists {
|
||||||
|
t.Error("Recent token should not have been cleaned up")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify old tokens were removed
|
||||||
|
if _, exists := server.acceptables["old_token_1"]; exists {
|
||||||
|
t.Error("Old token should have been cleaned up")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test 2: Verify size-based eviction when too many tokens
|
||||||
|
t.Run("SizeBasedEviction", func(t *testing.T) {
|
||||||
|
server.acceptables = make(map[string]time.Time)
|
||||||
|
|
||||||
|
// Add more than 50 tokens
|
||||||
|
for i := 0; i < 60; i++ {
|
||||||
|
token := server.Acceptable()
|
||||||
|
// Ensure each token has a slightly different timestamp
|
||||||
|
time.Sleep(1 * time.Millisecond)
|
||||||
|
if token == "" {
|
||||||
|
t.Error("Acceptable() should return a valid token")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should be limited to around 50 tokens due to eviction
|
||||||
|
if len(server.acceptables) > 55 {
|
||||||
|
t.Errorf("Expected token count to be limited, got %d", len(server.acceptables))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test 3: Verify token validation works correctly
|
||||||
|
t.Run("TokenValidation", func(t *testing.T) {
|
||||||
|
server.acceptables = make(map[string]time.Time)
|
||||||
|
|
||||||
|
// Generate a token
|
||||||
|
token := server.Acceptable()
|
||||||
|
if token == "" {
|
||||||
|
t.Fatal("Expected valid token")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify token is valid
|
||||||
|
if !server.CheckAcceptable(token) {
|
||||||
|
t.Error("Token should be valid immediately after creation")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify token is consumed (single-use)
|
||||||
|
if server.CheckAcceptable(token) {
|
||||||
|
t.Error("Token should not be valid after first use")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify invalid token returns false
|
||||||
|
if server.CheckAcceptable("invalid_token") {
|
||||||
|
t.Error("Invalid token should return false")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test 4: Verify memory doesn't grow unboundedly
|
||||||
|
t.Run("UnboundedGrowthPrevention", func(t *testing.T) {
|
||||||
|
server.acceptables = make(map[string]time.Time)
|
||||||
|
|
||||||
|
// Generate many tokens without checking them
|
||||||
|
// This was the original bug scenario
|
||||||
|
for i := 0; i < 200; i++ {
|
||||||
|
_ = server.Acceptable()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Memory should be bounded
|
||||||
|
if len(server.acceptables) > 60 {
|
||||||
|
t.Errorf("Memory growth not properly bounded: %d tokens", len(server.acceptables))
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Token map size after 200 generations: %d (should be bounded)", len(server.acceptables))
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test 5: Test concurrent access safety
|
||||||
|
t.Run("ConcurrentAccess", func(t *testing.T) {
|
||||||
|
server.acceptables = make(map[string]time.Time)
|
||||||
|
|
||||||
|
// Launch multiple goroutines generating and checking tokens
|
||||||
|
done := make(chan bool, 4)
|
||||||
|
|
||||||
|
// Token generators
|
||||||
|
go func() {
|
||||||
|
for i := 0; i < 50; i++ {
|
||||||
|
_ = server.Acceptable()
|
||||||
|
}
|
||||||
|
done <- true
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for i := 0; i < 50; i++ {
|
||||||
|
_ = server.Acceptable()
|
||||||
|
}
|
||||||
|
done <- true
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Token checkers
|
||||||
|
go func() {
|
||||||
|
for i := 0; i < 25; i++ {
|
||||||
|
token := server.Acceptable()
|
||||||
|
_ = server.CheckAcceptable(token)
|
||||||
|
}
|
||||||
|
done <- true
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for i := 0; i < 25; i++ {
|
||||||
|
token := server.Acceptable()
|
||||||
|
_ = server.CheckAcceptable(token)
|
||||||
|
}
|
||||||
|
done <- true
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for all goroutines to complete
|
||||||
|
for i := 0; i < 4; i++ {
|
||||||
|
<-done
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should not panic and should have bounded size
|
||||||
|
if len(server.acceptables) > 100 {
|
||||||
|
t.Errorf("Concurrent access resulted in unbounded growth: %d tokens", len(server.acceptables))
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Token map size after concurrent access: %d", len(server.acceptables))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test the cleanup methods directly
|
||||||
|
func TestTokenCleanupMethods(t *testing.T) {
|
||||||
|
server := &Server{
|
||||||
|
acceptables: make(map[string]time.Time),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test cleanupExpiredTokensUnsafe
|
||||||
|
t.Run("CleanupExpired", func(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
server.acceptables["expired1"] = now.Add(-5 * time.Minute)
|
||||||
|
server.acceptables["expired2"] = now.Add(-6 * time.Minute)
|
||||||
|
server.acceptables["valid"] = now
|
||||||
|
|
||||||
|
server.cleanupExpiredTokensUnsafe()
|
||||||
|
|
||||||
|
if len(server.acceptables) != 1 {
|
||||||
|
t.Errorf("Expected 1 token after cleanup, got %d", len(server.acceptables))
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, exists := server.acceptables["valid"]; !exists {
|
||||||
|
t.Error("Valid token should remain after cleanup")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test evictOldestTokensUnsafe
|
||||||
|
t.Run("EvictOldest", func(t *testing.T) {
|
||||||
|
server.acceptables = make(map[string]time.Time)
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
// Add tokens with different timestamps
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
server.acceptables[string(rune('a'+i))] = now.Add(time.Duration(-i) * time.Minute)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Evict to keep only 5
|
||||||
|
server.evictOldestTokensUnsafe(5)
|
||||||
|
|
||||||
|
if len(server.acceptables) != 5 {
|
||||||
|
t.Errorf("Expected 5 tokens after eviction, got %d", len(server.acceptables))
|
||||||
|
}
|
||||||
|
|
||||||
|
// The newest tokens should remain
|
||||||
|
if _, exists := server.acceptables["a"]; !exists {
|
||||||
|
t.Error("Newest token should remain after eviction")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
@@ -215,10 +215,16 @@ func (rs *ReseederImpl) PeerSu3Bytes(peer Peer) ([]byte, error) {
|
|||||||
m := rs.su3s.Load().([][]byte)
|
m := rs.su3s.Load().([][]byte)
|
||||||
|
|
||||||
if len(m) == 0 {
|
if len(m) == 0 {
|
||||||
return nil, errors.New("404")
|
return nil, errors.New("502: Internal service error, no reseed file available")
|
||||||
}
|
}
|
||||||
|
|
||||||
return m[peer.Hash()%len(m)], nil
|
// Additional safety: ensure index is valid (defense in depth)
|
||||||
|
index := int(peer.Hash()) % len(m)
|
||||||
|
if index < 0 || index >= len(m) {
|
||||||
|
return nil, errors.New("404: Reseed file not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
return m[index], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *ReseederImpl) createSu3(seeds []routerInfo) (*su3.File, error) {
|
func (rs *ReseederImpl) createSu3(seeds []routerInfo) (*su3.File, error) {
|
||||||
|
@@ -56,10 +56,10 @@ func TestLocalNetDb_ConfigurableRouterInfoAge(t *testing.T) {
|
|||||||
description: "Should include files up to 72 hours old",
|
description: "Should include files up to 72 hours old",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "192 hour limit (current default)",
|
name: "192 hour limit (legacy compatibility)",
|
||||||
maxAge: 192 * time.Hour,
|
maxAge: 192 * time.Hour,
|
||||||
expectedFiles: 4, // All files should be included
|
expectedFiles: 4, // All files should be included
|
||||||
description: "Should include files up to 192 hours old",
|
description: "Should include files up to 192 hours old (for backwards compatibility)",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "36 hour limit (strict)",
|
name: "36 hour limit (strict)",
|
||||||
@@ -100,8 +100,8 @@ func TestLocalNetDb_DefaultValues(t *testing.T) {
|
|||||||
|
|
||||||
// Test with different duration values
|
// Test with different duration values
|
||||||
testDurations := []time.Duration{
|
testDurations := []time.Duration{
|
||||||
72 * time.Hour, // 3 days (I2P standard)
|
72 * time.Hour, // 3 days (I2P standard default)
|
||||||
192 * time.Hour, // 8 days (old default)
|
192 * time.Hour, // 8 days (legacy compatibility)
|
||||||
24 * time.Hour, // 1 day (strict)
|
24 * time.Hour, // 1 day (strict)
|
||||||
7 * 24 * time.Hour, // 1 week
|
7 * 24 * time.Hour, // 1 week
|
||||||
}
|
}
|
||||||
@@ -116,3 +116,145 @@ func TestLocalNetDb_DefaultValues(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test for Bug #2: Race Condition in SU3 Cache Access
|
||||||
|
func TestSU3CacheRaceCondition(t *testing.T) {
|
||||||
|
// Create a mock netdb that will fail during RouterInfos() call
|
||||||
|
tempDir, err := os.MkdirTemp("", "netdb_test_race")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp dir: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir)
|
||||||
|
|
||||||
|
// Create a minimal netdb with no router files (this will cause rebuild to fail)
|
||||||
|
netdb := NewLocalNetDb(tempDir, 72*time.Hour)
|
||||||
|
reseeder := NewReseeder(netdb)
|
||||||
|
|
||||||
|
// Mock peer for testing
|
||||||
|
peer := Peer("testpeer")
|
||||||
|
|
||||||
|
// Test 1: Empty cache (should return 404, not panic)
|
||||||
|
_, err = reseeder.PeerSu3Bytes(peer)
|
||||||
|
if err == nil {
|
||||||
|
t.Error("Expected error when cache is empty, got nil")
|
||||||
|
} else if err.Error() != "404" {
|
||||||
|
t.Logf("Got expected error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 2: Simulate the actual race condition where atomic.Value
|
||||||
|
// might briefly hold an empty slice during rebuild
|
||||||
|
// Force an empty slice into the cache to simulate the race
|
||||||
|
reseeder.su3s.Store([][]byte{})
|
||||||
|
|
||||||
|
// This should also return 404, not panic
|
||||||
|
_, err = reseeder.PeerSu3Bytes(peer)
|
||||||
|
if err == nil {
|
||||||
|
t.Error("Expected error when cache is forcibly emptied, got nil")
|
||||||
|
} else if err.Error() != "404" {
|
||||||
|
t.Logf("Got expected error for empty cache: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 3: The race condition might also be about concurrent access
|
||||||
|
// Let's test if we can make it panic with specific timing
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
// Simulate rapid cache updates that might leave empty slices briefly
|
||||||
|
go func() {
|
||||||
|
reseeder.su3s.Store([][]byte{})
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
_, _ = reseeder.PeerSu3Bytes(peer)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Log("Race condition test completed - if we reach here, no panic occurred")
|
||||||
|
|
||||||
|
// Test 4: Additional bounds checking (the actual fix)
|
||||||
|
// Verify our bounds check works even in edge cases
|
||||||
|
testSlice := [][]byte{
|
||||||
|
[]byte("su3-file-1"),
|
||||||
|
[]byte("su3-file-2"),
|
||||||
|
}
|
||||||
|
reseeder.su3s.Store(testSlice)
|
||||||
|
|
||||||
|
// This should work normally
|
||||||
|
result, err := reseeder.PeerSu3Bytes(peer)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error with valid cache: %v", err)
|
||||||
|
}
|
||||||
|
if result == nil {
|
||||||
|
t.Error("Expected su3 bytes, got nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test for Bug #2 Fix: Improved bounds checking in SU3 cache access
|
||||||
|
func TestSU3BoundsCheckingFix(t *testing.T) {
|
||||||
|
tempDir, err := os.MkdirTemp("", "netdb_test_bounds")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp dir: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir)
|
||||||
|
|
||||||
|
netdb := NewLocalNetDb(tempDir, 72*time.Hour)
|
||||||
|
reseeder := NewReseeder(netdb)
|
||||||
|
peer := Peer("testpeer")
|
||||||
|
|
||||||
|
// Test with valid non-empty cache
|
||||||
|
validCache := [][]byte{
|
||||||
|
[]byte("su3-file-1"),
|
||||||
|
[]byte("su3-file-2"),
|
||||||
|
[]byte("su3-file-3"),
|
||||||
|
}
|
||||||
|
reseeder.su3s.Store(validCache)
|
||||||
|
|
||||||
|
// This should work correctly
|
||||||
|
result, err := reseeder.PeerSu3Bytes(peer)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error with valid cache: %v", err)
|
||||||
|
}
|
||||||
|
if result == nil {
|
||||||
|
t.Error("Expected su3 bytes, got nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify we get one of the expected results
|
||||||
|
found := false
|
||||||
|
for _, expected := range validCache {
|
||||||
|
if string(result) == string(expected) {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
t.Error("Result not found in expected su3 cache")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Log("Bounds checking fix verified - proper access to su3 cache")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test for Bug #4 Fix: Verify CLI default matches I2P standard (72 hours)
|
||||||
|
func TestRouterAgeDefaultConsistency(t *testing.T) {
|
||||||
|
// This test documents that the CLI default of 72 hours is the I2P standard
|
||||||
|
// and ensures consistency between documentation and implementation
|
||||||
|
|
||||||
|
defaultAge := 72 * time.Hour
|
||||||
|
|
||||||
|
tempDir, err := os.MkdirTemp("", "netdb_test_default")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp dir: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir)
|
||||||
|
|
||||||
|
// Test that when we use the documented default (72h), it works as expected
|
||||||
|
netdb := NewLocalNetDb(tempDir, defaultAge)
|
||||||
|
|
||||||
|
if netdb.MaxRouterInfoAge != defaultAge {
|
||||||
|
t.Errorf("Expected MaxRouterInfoAge to be %v (I2P standard), got %v", defaultAge, netdb.MaxRouterInfoAge)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify this matches what the CLI flag shows as default
|
||||||
|
expectedDefault := 72 * time.Hour
|
||||||
|
if netdb.MaxRouterInfoAge != expectedDefault {
|
||||||
|
t.Errorf("Router age default inconsistency: expected %v (CLI default), got %v", expectedDefault, netdb.MaxRouterInfoAge)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Router age default correctly set to %v (I2P standard)", netdb.MaxRouterInfoAge)
|
||||||
|
}
|
||||||
|
138
reseed/user_agent_test.go
Normal file
138
reseed/user_agent_test.go
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
package reseed
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Test for Bug #6: User Agent String Mismatch with I2P Compatibility
|
||||||
|
// This test verifies that the server strictly enforces the exact I2P user agent
|
||||||
|
// Only "Wget/1.11.4" is allowed - no other versions or variations
|
||||||
|
func TestUserAgentCompatibility(t *testing.T) {
|
||||||
|
// Create a simple handler that just returns OK
|
||||||
|
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
w.Write([]byte("OK"))
|
||||||
|
})
|
||||||
|
|
||||||
|
// Wrap with our verification middleware
|
||||||
|
handler := verifyMiddleware(testHandler)
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
userAgent string
|
||||||
|
expectedStatus int
|
||||||
|
description string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Exact match (current behavior)",
|
||||||
|
userAgent: "Wget/1.11.4",
|
||||||
|
expectedStatus: http.StatusOK,
|
||||||
|
description: "Should accept the exact expected user agent",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Newer wget version",
|
||||||
|
userAgent: "Wget/1.12.0",
|
||||||
|
expectedStatus: http.StatusForbidden,
|
||||||
|
description: "Should reject newer wget versions - only exact I2P standard allowed",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Much newer wget version",
|
||||||
|
userAgent: "Wget/1.20.3",
|
||||||
|
expectedStatus: http.StatusForbidden,
|
||||||
|
description: "Should reject much newer wget versions - only exact I2P standard allowed",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Invalid user agent (not wget)",
|
||||||
|
userAgent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64)",
|
||||||
|
expectedStatus: http.StatusForbidden,
|
||||||
|
description: "Should reject non-wget user agents",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Invalid user agent (curl)",
|
||||||
|
userAgent: "curl/7.68.0",
|
||||||
|
expectedStatus: http.StatusForbidden,
|
||||||
|
description: "Should reject curl and other non-wget agents",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Malformed wget version",
|
||||||
|
userAgent: "Wget/invalid",
|
||||||
|
expectedStatus: http.StatusForbidden,
|
||||||
|
description: "Should reject malformed wget versions",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty user agent",
|
||||||
|
userAgent: "",
|
||||||
|
expectedStatus: http.StatusForbidden,
|
||||||
|
description: "Should reject empty user agent",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
req := httptest.NewRequest("GET", "/i2pseeds.su3", nil)
|
||||||
|
req.Header.Set("User-Agent", tc.userAgent)
|
||||||
|
|
||||||
|
rr := httptest.NewRecorder()
|
||||||
|
handler.ServeHTTP(rr, req)
|
||||||
|
|
||||||
|
if rr.Code != tc.expectedStatus {
|
||||||
|
t.Errorf("Expected status %d, got %d for user agent %q. %s",
|
||||||
|
tc.expectedStatus, rr.Code, tc.userAgent, tc.description)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log the current behavior for visibility
|
||||||
|
if tc.expectedStatus == http.StatusForbidden && rr.Code == http.StatusForbidden {
|
||||||
|
t.Logf("BLOCKED (as expected): %s -> %d", tc.userAgent, rr.Code)
|
||||||
|
} else if tc.expectedStatus == http.StatusOK && rr.Code == http.StatusOK {
|
||||||
|
t.Logf("ALLOWED (as expected): %s -> %d", tc.userAgent, rr.Code)
|
||||||
|
} else {
|
||||||
|
t.Logf("MISMATCH: %s -> %d (expected %d)", tc.userAgent, rr.Code, tc.expectedStatus)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// isValidI2PUserAgent validates if a user agent string is the exact I2P-required user agent
|
||||||
|
// According to I2P protocol specification, ONLY "Wget/1.11.4" is valid for SU3 bundle fetching
|
||||||
|
func isValidI2PUserAgent(userAgent string) bool {
|
||||||
|
// I2P protocol requires exactly "Wget/1.11.4" - no other versions or variations allowed
|
||||||
|
return userAgent == I2pUserAgent
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test for the strict user agent validation (I2P protocol requirement)
|
||||||
|
func TestStrictUserAgentValidation(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
userAgent string
|
||||||
|
shouldAccept bool
|
||||||
|
description string
|
||||||
|
}{
|
||||||
|
{"Wget/1.11.4", true, "Only valid I2P user agent"},
|
||||||
|
{"Wget/1.12.0", false, "Newer version not allowed"},
|
||||||
|
{"Wget/1.20.3", false, "Much newer version not allowed"},
|
||||||
|
{"Wget/2.0.0", false, "Major version upgrade not allowed"},
|
||||||
|
{"wget/1.11.4", false, "Lowercase wget (case sensitive)"},
|
||||||
|
{"Wget/1.11", false, "Missing patch version"},
|
||||||
|
{"Wget/1.11.4.5", false, "Too many version parts"},
|
||||||
|
{"Wget/1.11.4-ubuntu", false, "Version with suffix"},
|
||||||
|
{"Wget/abc", false, "Non-numeric version"},
|
||||||
|
{"Mozilla/5.0", false, "Browser user agent"},
|
||||||
|
{"curl/7.68.0", false, "Curl user agent"},
|
||||||
|
{"", false, "Empty user agent"},
|
||||||
|
{"Wget", false, "No version"},
|
||||||
|
{"Wget/", false, "Empty version"},
|
||||||
|
{"Wget/1.11.3", false, "Older version not allowed"},
|
||||||
|
{"Wget/1.10.4", false, "Older minor version not allowed"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.userAgent, func(t *testing.T) {
|
||||||
|
result := isValidI2PUserAgent(tc.userAgent)
|
||||||
|
if result != tc.shouldAccept {
|
||||||
|
t.Errorf("For user agent %q: expected %v, got %v. %s",
|
||||||
|
tc.userAgent, tc.shouldAccept, result, tc.description)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
Reference in New Issue
Block a user