Recursive jobs support (#129)

This commit is contained in:
Joona Hoikkala 2019-12-31 14:19:27 +02:00 committed by GitHub
parent fef5f0c78f
commit b4adeae872
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 107 additions and 4 deletions

View file

@ -4,6 +4,7 @@
- New - New
- New CLI flag `-od` (output directory) to enable writing requests and responses for matched results to a file for postprocessing or debugging purposes. - New CLI flag `-od` (output directory) to enable writing requests and responses for matched results to a file for postprocessing or debugging purposes.
- New CLI flag `-maxtime` to limit the running time of ffuf - New CLI flag `-maxtime` to limit the running time of ffuf
- New CLI flags `-recursion` and `-recursion-depth` to control recursive ffuf jobs if directories are found. This requires the `-u` to end with FUZZ keyword.
- Changed - Changed
- Limit the use of `-e` (extensions) to a single keyword: FUZZ - Limit the use of `-e` (extensions) to a single keyword: FUZZ
- Regexp matching and filtering (-mr/-fr) allow using keywords in patterns - Regexp matching and filtering (-mr/-fr) allow using keywords in patterns

10
main.go
View file

@ -99,6 +99,8 @@ func main() {
flag.BoolVar(&conf.StopOnErrors, "se", false, "Stop on spurious errors") flag.BoolVar(&conf.StopOnErrors, "se", false, "Stop on spurious errors")
flag.BoolVar(&conf.StopOnAll, "sa", false, "Stop on all error cases. Implies -sf and -se. Also stops on spurious 429 response codes.") flag.BoolVar(&conf.StopOnAll, "sa", false, "Stop on all error cases. Implies -sf and -se. Also stops on spurious 429 response codes.")
flag.BoolVar(&conf.FollowRedirects, "r", false, "Follow redirects") flag.BoolVar(&conf.FollowRedirects, "r", false, "Follow redirects")
flag.BoolVar(&conf.Recursion, "recursion", false, "Scan recursively. Only FUZZ keyword is supported, and URL (-u) has to end in it.")
flag.IntVar(&conf.RecursionDepth, "recursion-depth", 0, "Maximum recursion depth.")
flag.BoolVar(&conf.AutoCalibration, "ac", false, "Automatically calibrate filtering options") flag.BoolVar(&conf.AutoCalibration, "ac", false, "Automatically calibrate filtering options")
flag.Var(&opts.AutoCalibrationStrings, "acc", "Custom auto-calibration string. Can be used multiple times. Implies -ac") flag.Var(&opts.AutoCalibrationStrings, "acc", "Custom auto-calibration string. Can be used multiple times. Implies -ac")
flag.IntVar(&conf.Threads, "t", 40, "Number of concurrent threads.") flag.IntVar(&conf.Threads, "t", 40, "Number of concurrent threads.")
@ -371,6 +373,14 @@ func prepareConfig(parseOpts *cliOptions, conf *ffuf.Config) error {
} }
} }
// Do checks for recursion mode
if conf.Recursion {
if !strings.HasSuffix(conf.Url, "FUZZ") {
errmsg := fmt.Sprintf("When using -recursion the URL (-u) must end with FUZZ keyword.")
errs.Add(fmt.Errorf(errmsg))
}
}
return errs.ErrorOrNil() return errs.ErrorOrNil()
} }

View file

@ -49,6 +49,8 @@ type Config struct {
CommandLine string CommandLine string
Verbose bool Verbose bool
MaxTime int MaxTime int
Recursion bool
RecursionDepth int
} }
type InputProviderConfig struct { type InputProviderConfig struct {
@ -84,5 +86,7 @@ func NewConfig(ctx context.Context) Config {
conf.DirSearchCompat = false conf.DirSearchCompat = false
conf.Verbose = false conf.Verbose = false
conf.MaxTime = 0 conf.MaxTime = 0
conf.Recursion = false
conf.RecursionDepth = 0
return conf return conf
} }

View file

@ -17,6 +17,7 @@ type InputProvider interface {
AddProvider(InputProviderConfig) error AddProvider(InputProviderConfig) error
Next() bool Next() bool
Position() int Position() int
Reset()
Value() map[string][]byte Value() map[string][]byte
Total() int Total() int
} }
@ -37,6 +38,7 @@ type OutputProvider interface {
Banner() error Banner() error
Finalize() error Finalize() error
Progress(status Progress) Progress(status Progress)
Info(infostring string)
Error(errstring string) Error(errstring string)
Warning(warnstring string) Warning(warnstring string)
Result(resp Response) Result(resp Response)

View file

@ -27,6 +27,14 @@ type Job struct {
Count429 int Count429 int
Error string Error string
startTime time.Time startTime time.Time
queuejobs []QueueJob
queuepos int
currentDepth int
}
type QueueJob struct {
Url string
depth int
} }
func NewJob(conf *Config) Job { func NewJob(conf *Config) Job {
@ -35,6 +43,9 @@ func NewJob(conf *Config) Job {
j.ErrorCounter = 0 j.ErrorCounter = 0
j.SpuriousErrorCounter = 0 j.SpuriousErrorCounter = 0
j.Running = false j.Running = false
j.queuepos = 0
j.queuejobs = make([]QueueJob, 0)
j.currentDepth = 0
return j return j
} }
@ -69,17 +80,47 @@ func (j *Job) resetSpuriousErrors() {
//Start the execution of the Job //Start the execution of the Job
func (j *Job) Start() { func (j *Job) Start() {
// Add the default job to job queue
j.queuejobs = append(j.queuejobs, QueueJob{Url: j.Config.Url, depth: 0})
rand.Seed(time.Now().UnixNano()) rand.Seed(time.Now().UnixNano())
j.Total = j.Input.Total() j.Total = j.Input.Total()
defer j.Stop() defer j.Stop()
j.Running = true
j.startTime = time.Now()
//Show banner if not running in silent mode //Show banner if not running in silent mode
if !j.Config.Quiet { if !j.Config.Quiet {
j.Output.Banner() j.Output.Banner()
} }
j.Running = true
j.startTime = time.Now()
// Monitor for SIGTERM and do cleanup properly (writing the output files etc) // Monitor for SIGTERM and do cleanup properly (writing the output files etc)
j.interruptMonitor() j.interruptMonitor()
for j.jobsInQueue() {
j.prepareQueueJob()
if j.queuepos > 1 {
// Print info for queued recursive jobs
j.Output.Info(fmt.Sprintf("Scanning: %s", j.Config.Url))
}
j.Input.Reset()
j.Counter = 0
j.startExecution()
}
j.Output.Finalize()
}
func (j *Job) jobsInQueue() bool {
if j.queuepos < len(j.queuejobs) {
return true
}
return false
}
func (j *Job) prepareQueueJob() {
j.Config.Url = j.queuejobs[j.queuepos].Url
j.currentDepth = j.queuejobs[j.queuepos].depth
j.queuepos += 1
}
func (j *Job) startExecution() {
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(1) wg.Add(1)
go j.runProgress(&wg) go j.runProgress(&wg)
@ -115,7 +156,6 @@ func (j *Job) Start() {
} }
wg.Wait() wg.Wait()
j.updateProgress() j.updateProgress()
j.Output.Finalize()
return return
} }
@ -150,6 +190,8 @@ func (j *Job) updateProgress() {
StartedAt: j.startTime, StartedAt: j.startTime,
ReqCount: j.Counter, ReqCount: j.Counter,
ReqTotal: j.Input.Total(), ReqTotal: j.Input.Total(),
QueuePos: j.queuepos,
QueueTotal: len(j.queuejobs),
ErrorCount: j.ErrorCounter, ErrorCount: j.ErrorCounter,
} }
j.Output.Progress(prog) j.Output.Progress(prog)
@ -223,9 +265,30 @@ func (j *Job) runTask(input map[string][]byte, position int, retried bool) {
// Refresh the progress indicator as we printed something out // Refresh the progress indicator as we printed something out
j.updateProgress() j.updateProgress()
} }
if j.Config.Recursion && len(resp.GetRedirectLocation()) > 0 {
j.handleRecursionJob(resp)
}
return return
} }
//handleRecursionJob adds a new recursion job to the job queue if a new directory is found
func (j *Job) handleRecursionJob(resp Response) {
if (resp.Request.Url + "/") != resp.GetRedirectLocation() {
// Not a directory, return early
return
}
if j.Config.RecursionDepth == 0 || j.currentDepth < j.Config.RecursionDepth {
// We have yet to reach the maximum recursion depth
recUrl := resp.Request.Url + "/" + "FUZZ"
newJob := QueueJob{Url: recUrl, depth: j.currentDepth + 1}
j.queuejobs = append(j.queuejobs, newJob)
j.Output.Info(fmt.Sprintf("Adding a new job to the queue: %s", recUrl))
} else {
j.Output.Warning(fmt.Sprintf("Directory found, but recursion depth exceeded. Ignoring: %s", resp.GetRedirectLocation()))
}
}
//CalibrateResponses returns slice of Responses for randomly generated filter autocalibration requests //CalibrateResponses returns slice of Responses for randomly generated filter autocalibration requests
func (j *Job) CalibrateResponses() ([]Response, error) { func (j *Job) CalibrateResponses() ([]Response, error) {
cInputs := make([]string, 0) cInputs := make([]string, 0)

View file

@ -8,5 +8,7 @@ type Progress struct {
StartedAt time.Time StartedAt time.Time
ReqCount int ReqCount int
ReqTotal int ReqTotal int
QueuePos int
QueueTotal int
ErrorCount int ErrorCount int
} }

View file

@ -67,6 +67,15 @@ func (i *MainInputProvider) Value() map[string][]byte {
return retval return retval
} }
//Reset resets all the inputproviders and counters
func (i *MainInputProvider) Reset() {
for _, p := range i.Providers {
p.ResetPosition()
}
i.position = 0
i.msbIterator = 0
}
//pitchforkValue returns a map of keyword:value pairs including all inputs. //pitchforkValue returns a map of keyword:value pairs including all inputs.
//This mode will iterate through wordlists in lockstep. //This mode will iterate through wordlists in lockstep.
func (i *MainInputProvider) pitchforkValue() map[string][]byte { func (i *MainInputProvider) pitchforkValue() map[string][]byte {

View file

@ -139,7 +139,19 @@ func (s *Stdoutput) Progress(status ffuf.Progress) {
dur -= mins * time.Minute dur -= mins * time.Minute
secs := dur / time.Second secs := dur / time.Second
fmt.Fprintf(os.Stderr, "%s:: Progress: [%d/%d] :: %d req/sec :: Duration: [%d:%02d:%02d] :: Errors: %d ::", TERMINAL_CLEAR_LINE, status.ReqCount, status.ReqTotal, reqRate, hours, mins, secs, status.ErrorCount) fmt.Fprintf(os.Stderr, "%s:: Progress: [%d/%d] :: Job [%d/%d] :: %d req/sec :: Duration: [%d:%02d:%02d] :: Errors: %d ::", TERMINAL_CLEAR_LINE, status.ReqCount, status.ReqTotal, status.QueuePos, status.QueueTotal, reqRate, hours, mins, secs, status.ErrorCount)
}
func (s *Stdoutput) Info(infostring string) {
if s.config.Quiet {
fmt.Fprintf(os.Stderr, "%s", infostring)
} else {
if !s.config.Colors {
fmt.Fprintf(os.Stderr, "%s[INFO] %s\n", TERMINAL_CLEAR_LINE, infostring)
} else {
fmt.Fprintf(os.Stderr, "%s[%sINFO%s] %s\n", TERMINAL_CLEAR_LINE, ANSI_BLUE, ANSI_CLEAR, infostring)
}
}
} }
func (s *Stdoutput) Error(errstring string) { func (s *Stdoutput) Error(errstring string) {