forked from tomnomnom/meg
-
Notifications
You must be signed in to change notification settings - Fork 3
/
main.go
executable file
·218 lines (182 loc) · 5.04 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
package main
import (
"bufio"
"fmt"
"net/url"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"time"
)
const (
userAgent = "Mozilla/5.0 (compatible; meg/0.2; +https://github.com/tomnomnom/meg)"
// argument defaults
defaultPathsFile = "./paths"
defaultHostsFile = "./hosts"
defaultOutputDir = "./out"
)
// a requester is a function that makes HTTP requests
type requester func(request) response
func main() {
// get the config struct
c := processArgs()
// read the paths file
paths, err := readLinesOrLiteral(c.paths, defaultPathsFile)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to open paths file: %s\n", err)
os.Exit(1)
}
// read the hosts file
hosts, err := readLinesOrLiteral(c.hosts, defaultHostsFile)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to open hosts file: %s\n", err)
os.Exit(1)
}
// make the output directory
err = os.MkdirAll(c.output, 0750)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to create output directory: %s\n", err)
os.Exit(1)
}
// open the index file
indexFile := filepath.Join(c.output, "index")
index, err := os.OpenFile(indexFile, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to open index file for writing: %s\n", err)
os.Exit(1)
}
// set up a rate limiter
rl := newRateLimiter(time.Duration(c.delay * 1000000))
// the request and response channels for
// the worker pool
requests := make(chan request)
responses := make(chan response)
// spin up some workers to do the requests
var wg sync.WaitGroup
for i := 0; i < c.concurrency; i++ {
wg.Add(1)
go func() {
for req := range requests {
rl.Block(req.Hostname())
responses <- c.requester(req)
}
wg.Done()
}()
}
// start outputting the response lines; we need a second
// WaitGroup so we know the outputting has finished
var owg sync.WaitGroup
owg.Add(1)
go func() {
for res := range responses {
if len(c.saveStatus) > 0 && !c.saveStatus.Includes(res.statusCode) {
continue
}
if len(c.saveResp) > 0 && !(strings.Contains(strings.Join(res.headers, ""), c.saveResp) || (strings.Contains(string(res.body), c.saveResp))) {
continue
}
if len(c.discResp) > 0 && (strings.Contains(strings.Join(res.headers, ""), c.discResp) || (strings.Contains(string(res.body), c.discResp))) {
continue
}
if len(c.regexIgnore) > 0 {
re := regexp.MustCompile(c.regexIgnore)
matched := re.MatchString(res.String())
if matched {
continue
}
}
if len(c.regexKeep) > 0 {
re := regexp.MustCompile(c.regexKeep)
matched := re.MatchString(res.String())
if !matched {
continue
}
}
if res.err != nil {
fmt.Fprintf(os.Stderr, "request failed: %s\n", res.err)
continue
}
path, err := res.save(c.output, c.noHeaders)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to save file: %s\n", err)
}
line := fmt.Sprintf("%s %s (%s)\n", path, res.request.URL(), res.status)
fmt.Fprintf(index, "%s", line)
if c.verbose {
fmt.Printf("%s", line)
}
}
owg.Done()
}()
// send requests for each path for every host
for _, path := range paths {
for _, host := range hosts {
// the host portion may contain a path prefix,
// so we should strip that off and add it to
// the beginning of the path.
u, err := url.Parse(host)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to parse host: %s\n", err)
continue
}
prefixedPath := u.Path + path
u.Path = ""
// stripping off a path means we need to
// rebuild the host portion too
host = u.String()
requests <- request{
method: c.method,
host: host,
path: prefixedPath,
headers: c.headers,
followLocation: c.followLocation,
body: c.body,
timeout: time.Duration(c.timeout * 1000000),
}
}
}
// once all of the requests have been sent we can
// close the requests channel
close(requests)
// wait for all the workers to finish before closing
// the responses channel
wg.Wait()
close(responses)
owg.Wait()
}
// readLines reads all of the lines from a text file in to
// a slice of strings, returning the slice and any error
func readLines(filename string) ([]string, error) {
f, err := os.Open(filename)
if err != nil {
return []string{}, err
}
defer f.Close()
lines := make([]string, 0)
sc := bufio.NewScanner(f)
for sc.Scan() {
lines = append(lines, sc.Text())
}
return lines, sc.Err()
}
// readLinesOrLiteral tries to read lines from a file, returning
// the arg in a string slice if the file doesn't exist, unless
// the arg matches its default value
func readLinesOrLiteral(arg, argDefault string) ([]string, error) {
if isFile(arg) {
return readLines(arg)
}
// if the argument isn't a file, but it is the default, don't
// treat it as a literal value
if arg == argDefault {
return []string{}, fmt.Errorf("file %s not found", arg)
}
return []string{arg}, nil
}
// isFile returns true if its argument is a regular file
func isFile(path string) bool {
f, err := os.Stat(path)
return err == nil && f.Mode().IsRegular()
}