2018-12-31 07:32:35 +00:00
|
|
|
#! /usr/bin/env python3
|
|
|
|
|
2018-12-30 20:25:04 +00:00
|
|
|
"""
|
|
|
|
Sherlock: Find Usernames Across Social Networks Module
|
2018-12-27 03:54:25 +00:00
|
|
|
|
|
|
|
This module contains the main logic to search for usernames at social
|
|
|
|
networks.
|
|
|
|
"""
|
2018-12-30 05:10:31 +00:00
|
|
|
|
2018-12-31 06:27:54 +00:00
|
|
|
import csv
|
2018-12-24 14:31:34 +00:00
|
|
|
import json
|
|
|
|
import os
|
2018-12-31 06:27:54 +00:00
|
|
|
import platform
|
2018-12-27 01:41:11 +00:00
|
|
|
import re
|
2019-01-22 17:55:03 +00:00
|
|
|
import sys
|
2019-01-31 00:37:03 +00:00
|
|
|
import random
|
2019-01-22 17:55:03 +00:00
|
|
|
from argparse import ArgumentParser, RawDescriptionHelpFormatter
|
|
|
|
from concurrent.futures import ThreadPoolExecutor
|
2019-01-07 03:33:36 +00:00
|
|
|
from time import time
|
2018-12-31 06:27:54 +00:00
|
|
|
|
|
|
|
import requests
|
2019-01-07 03:33:36 +00:00
|
|
|
from colorama import Fore, Style, init
|
2019-01-22 17:55:03 +00:00
|
|
|
|
2018-12-31 06:27:54 +00:00
|
|
|
from requests_futures.sessions import FuturesSession
|
2018-12-29 00:50:25 +00:00
|
|
|
from torrequest import TorRequest
|
2019-01-20 18:49:43 +00:00
|
|
|
from load_proxies import load_proxies_from_csv, check_proxy_list
|
2018-12-24 14:31:34 +00:00
|
|
|
|
2018-12-27 03:54:25 +00:00
|
|
|
module_name = "Sherlock: Find Usernames Across Social Networks"
|
2019-12-31 16:47:17 +00:00
|
|
|
__version__ = "0.10.3"
|
2018-12-25 19:19:21 +00:00
|
|
|
|
|
|
|
|
2019-12-07 23:24:09 +00:00
|
|
|
global proxy_list
|
2019-01-20 18:11:06 +00:00
|
|
|
|
|
|
|
proxy_list = []
|
2018-12-25 18:14:39 +00:00
|
|
|
|
2019-01-07 03:33:36 +00:00
|
|
|
class ElapsedFuturesSession(FuturesSession):
|
2019-01-07 17:04:55 +00:00
|
|
|
"""
|
|
|
|
Extends FutureSession to add a response time metric to each request.
|
|
|
|
|
|
|
|
This is taken (almost) directly from here: https://github.com/ross/requests-futures#working-in-the-background
|
|
|
|
"""
|
2019-01-07 03:33:36 +00:00
|
|
|
|
|
|
|
def request(self, method, url, hooks={}, *args, **kwargs):
|
|
|
|
start = time()
|
|
|
|
|
|
|
|
def timing(r, *args, **kwargs):
|
|
|
|
elapsed_sec = time() - start
|
|
|
|
r.elapsed = round(elapsed_sec * 1000)
|
|
|
|
|
|
|
|
try:
|
|
|
|
if isinstance(hooks['response'], (list, tuple)):
|
|
|
|
# needs to be first so we don't time other hooks execution
|
|
|
|
hooks['response'].insert(0, timing)
|
|
|
|
else:
|
|
|
|
hooks['response'] = [timing, hooks['response']]
|
|
|
|
except KeyError:
|
|
|
|
hooks['response'] = timing
|
|
|
|
|
|
|
|
return super(ElapsedFuturesSession, self).request(method, url, hooks=hooks, *args, **kwargs)
|
|
|
|
|
|
|
|
|
2019-12-19 23:37:12 +00:00
|
|
|
def print_info(title, info, color=True):
|
|
|
|
if color:
|
|
|
|
print(Style.BRIGHT + Fore.GREEN + "[" +
|
|
|
|
Fore.YELLOW + "*" +
|
|
|
|
Fore.GREEN + f"] {title}" +
|
|
|
|
Fore.WHITE + f" {info}" +
|
|
|
|
Fore.GREEN + " on:")
|
|
|
|
else:
|
|
|
|
print(f"[*] {title} {info} on:")
|
|
|
|
|
|
|
|
def print_error(err, errstr, var, verbose=False, color=True):
|
|
|
|
if color:
|
|
|
|
print(Style.BRIGHT + Fore.WHITE + "[" +
|
|
|
|
Fore.RED + "-" +
|
|
|
|
Fore.WHITE + "]" +
|
|
|
|
Fore.RED + f" {errstr}" +
|
|
|
|
Fore.YELLOW + f" {err if verbose else var}")
|
|
|
|
else:
|
|
|
|
print(f"[-] {errstr} {err if verbose else var}")
|
2019-01-07 03:33:36 +00:00
|
|
|
|
|
|
|
|
2019-01-07 17:04:55 +00:00
|
|
|
def format_response_time(response_time, verbose):
|
2019-01-07 03:33:36 +00:00
|
|
|
return " [{} ms]".format(response_time) if verbose else ""
|
|
|
|
|
|
|
|
|
2019-12-19 23:37:12 +00:00
|
|
|
def print_found(social_network, url, response_time, verbose=False, color=True):
|
|
|
|
if color:
|
|
|
|
print((Style.BRIGHT + Fore.WHITE + "[" +
|
|
|
|
Fore.GREEN + "+" +
|
|
|
|
Fore.WHITE + "]" +
|
|
|
|
format_response_time(response_time, verbose) +
|
|
|
|
Fore.GREEN + f" {social_network}:"), url)
|
|
|
|
else:
|
|
|
|
print(f"[+]{format_response_time(response_time, verbose)} {social_network}: {url}")
|
|
|
|
|
|
|
|
def print_not_found(social_network, response_time, verbose=False, color=True):
|
|
|
|
if color:
|
|
|
|
print((Style.BRIGHT + Fore.WHITE + "[" +
|
|
|
|
Fore.RED + "-" +
|
|
|
|
Fore.WHITE + "]" +
|
|
|
|
format_response_time(response_time, verbose) +
|
|
|
|
Fore.GREEN + f" {social_network}:" +
|
|
|
|
Fore.YELLOW + " Not Found!"))
|
|
|
|
else:
|
|
|
|
print(f"[-]{format_response_time(response_time, verbose)} {social_network}: Not Found!")
|
2018-12-28 19:15:41 +00:00
|
|
|
|
2019-12-19 23:37:12 +00:00
|
|
|
def print_invalid(social_network, msg, color=True):
|
2019-03-08 05:40:58 +00:00
|
|
|
"""Print invalid search result."""
|
2019-12-19 23:37:12 +00:00
|
|
|
if color:
|
|
|
|
print((Style.BRIGHT + Fore.WHITE + "[" +
|
|
|
|
Fore.RED + "-" +
|
|
|
|
Fore.WHITE + "]" +
|
|
|
|
Fore.GREEN + f" {social_network}:" +
|
|
|
|
Fore.YELLOW + f" {msg}"))
|
|
|
|
else:
|
|
|
|
print(f"[-] {social_network} {msg}")
|
2019-03-08 05:40:58 +00:00
|
|
|
|
2018-12-26 17:25:28 +00:00
|
|
|
|
2019-12-19 23:37:12 +00:00
|
|
|
def get_response(request_future, error_type, social_network, verbose=False, retry_no=None, color=True):
|
2019-03-08 05:40:58 +00:00
|
|
|
|
2019-01-20 18:44:04 +00:00
|
|
|
global proxy_list
|
|
|
|
|
2018-12-25 16:01:01 +00:00
|
|
|
try:
|
2018-12-30 05:10:31 +00:00
|
|
|
rsp = request_future.result()
|
2018-12-29 01:31:31 +00:00
|
|
|
if rsp.status_code:
|
2019-01-07 03:33:36 +00:00
|
|
|
return rsp, error_type, rsp.elapsed
|
2018-12-25 16:01:01 +00:00
|
|
|
except requests.exceptions.HTTPError as errh:
|
2019-12-19 23:37:12 +00:00
|
|
|
print_error(errh, "HTTP Error:", social_network, verbose, color)
|
2019-01-20 18:44:04 +00:00
|
|
|
|
|
|
|
# In case our proxy fails, we retry with another proxy.
|
|
|
|
except requests.exceptions.ProxyError as errp:
|
|
|
|
if retry_no>0 and len(proxy_list)>0:
|
|
|
|
#Selecting the new proxy.
|
|
|
|
new_proxy = random.choice(proxy_list)
|
2019-05-15 12:39:39 +00:00
|
|
|
new_proxy = f'{new_proxy.protocol}://{new_proxy.ip}:{new_proxy.port}'
|
|
|
|
print(f'Retrying with {new_proxy}')
|
2019-01-20 18:44:04 +00:00
|
|
|
request_future.proxy = {'http':new_proxy,'https':new_proxy}
|
2019-12-19 23:37:12 +00:00
|
|
|
get_response(request_future,error_type, social_network, verbose,retry_no=retry_no-1, color=color)
|
2019-01-20 18:44:04 +00:00
|
|
|
else:
|
2019-12-19 23:37:12 +00:00
|
|
|
print_error(errp, "Proxy error:", social_network, verbose, color)
|
2018-12-25 16:01:01 +00:00
|
|
|
except requests.exceptions.ConnectionError as errc:
|
2019-12-19 23:37:12 +00:00
|
|
|
print_error(errc, "Error Connecting:", social_network, verbose, color)
|
2018-12-25 16:01:01 +00:00
|
|
|
except requests.exceptions.Timeout as errt:
|
2019-12-19 23:37:12 +00:00
|
|
|
print_error(errt, "Timeout Error:", social_network, verbose, color)
|
2018-12-25 16:01:01 +00:00
|
|
|
except requests.exceptions.RequestException as err:
|
2019-12-19 23:37:12 +00:00
|
|
|
print_error(err, "Unknown error:", social_network, verbose, color)
|
2019-01-07 03:33:36 +00:00
|
|
|
return None, "", -1
|
2018-12-24 14:31:34 +00:00
|
|
|
|
|
|
|
|
2019-12-07 23:24:09 +00:00
|
|
|
def sherlock(username, site_data, verbose=False, tor=False, unique_tor=False,
|
2019-12-19 23:37:12 +00:00
|
|
|
proxy=None, print_found_only=False, timeout=None, color=True):
|
2018-12-29 04:28:47 +00:00
|
|
|
"""Run Sherlock Analysis.
|
|
|
|
|
|
|
|
Checks for existence of username on various social media sites.
|
2018-12-30 23:46:02 +00:00
|
|
|
|
2018-12-29 04:28:47 +00:00
|
|
|
Keyword Arguments:
|
|
|
|
username -- String indicating username that report
|
|
|
|
should be created against.
|
2019-01-06 04:52:53 +00:00
|
|
|
site_data -- Dictionary containing all of the site data.
|
2018-12-29 14:59:30 +00:00
|
|
|
verbose -- Boolean indicating whether to give verbose output.
|
|
|
|
tor -- Boolean indicating whether to use a tor circuit for the requests.
|
|
|
|
unique_tor -- Boolean indicating whether to use a new tor circuit for each request.
|
2019-01-12 04:55:19 +00:00
|
|
|
proxy -- String indicating the proxy URL
|
2019-12-07 23:24:09 +00:00
|
|
|
timeout -- Time in seconds to wait before timing out request.
|
|
|
|
Default is no timeout.
|
2019-12-19 23:37:12 +00:00
|
|
|
color -- Boolean indicating whether to color terminal output
|
2018-12-29 04:28:47 +00:00
|
|
|
|
|
|
|
Return Value:
|
2019-11-26 16:42:06 +00:00
|
|
|
Dictionary containing results from report. Key of dictionary is the name
|
2018-12-29 04:28:47 +00:00
|
|
|
of the social network site, and the value is another dictionary with
|
|
|
|
the following keys:
|
|
|
|
url_main: URL of main site.
|
|
|
|
url_user: URL of user on site (if account exists).
|
|
|
|
exists: String indicating results of test for account existence.
|
|
|
|
http_status: HTTP status code of query which checked for existence on
|
|
|
|
site.
|
|
|
|
response_text: Text that came back from request. May be None if
|
|
|
|
there was an HTTP error when checking for existence.
|
|
|
|
"""
|
2019-12-19 23:37:12 +00:00
|
|
|
print_info("Checking username", username, color)
|
2018-12-24 14:31:34 +00:00
|
|
|
|
2018-12-30 05:10:31 +00:00
|
|
|
# Create session based on request methodology
|
|
|
|
if tor or unique_tor:
|
2019-12-24 22:25:37 +00:00
|
|
|
#Requests using Tor obfuscation
|
2018-12-30 05:10:31 +00:00
|
|
|
underlying_request = TorRequest()
|
2019-01-12 04:55:19 +00:00
|
|
|
underlying_session = underlying_request.session
|
2019-12-24 22:25:37 +00:00
|
|
|
else:
|
|
|
|
#Normal requests
|
|
|
|
underlying_session = requests.session()
|
|
|
|
underlying_request = requests.Request()
|
2018-12-30 05:10:31 +00:00
|
|
|
|
2019-12-24 22:56:10 +00:00
|
|
|
#Limit number of workers to 20.
|
|
|
|
#This is probably vastly overkill.
|
|
|
|
if len(site_data) >= 20:
|
|
|
|
max_workers=20
|
|
|
|
else:
|
|
|
|
max_workers=len(site_data)
|
|
|
|
|
|
|
|
#Create multi-threaded session for all requests.
|
|
|
|
session = ElapsedFuturesSession(max_workers=max_workers,
|
|
|
|
session=underlying_session)
|
2018-12-30 05:10:31 +00:00
|
|
|
|
2018-12-29 04:28:47 +00:00
|
|
|
# Results from analysis of all sites
|
|
|
|
results_total = {}
|
2018-12-30 05:10:31 +00:00
|
|
|
|
|
|
|
# First create futures for all requests. This allows for the requests to run in parallel
|
2019-01-06 04:52:53 +00:00
|
|
|
for social_network, net_info in site_data.items():
|
2018-12-25 19:19:21 +00:00
|
|
|
|
2018-12-29 04:28:47 +00:00
|
|
|
# Results from analysis of this specific site
|
|
|
|
results_site = {}
|
|
|
|
|
|
|
|
# Record URL of main site
|
2018-12-31 00:22:30 +00:00
|
|
|
results_site['url_main'] = net_info.get("urlMain")
|
2018-12-29 04:28:47 +00:00
|
|
|
|
2019-10-20 15:46:11 +00:00
|
|
|
# A user agent is needed because some sites don't return the correct
|
|
|
|
# information since they think that we are bots (Which we actually are...)
|
2019-10-01 09:59:32 +00:00
|
|
|
headers = {
|
|
|
|
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0',
|
|
|
|
}
|
2019-10-20 15:46:11 +00:00
|
|
|
|
2019-10-01 09:59:32 +00:00
|
|
|
if "headers" in net_info:
|
2019-11-26 16:42:06 +00:00
|
|
|
# Override/append any extra headers required by a given site.
|
2019-10-01 09:59:32 +00:00
|
|
|
headers.update(net_info["headers"])
|
2018-12-29 04:28:47 +00:00
|
|
|
|
2018-12-30 05:10:31 +00:00
|
|
|
# Don't make request if username is invalid for the site
|
2018-12-30 22:39:08 +00:00
|
|
|
regex_check = net_info.get("regexCheck")
|
2019-11-29 14:01:05 +00:00
|
|
|
if regex_check and re.search(regex_check, username) is None:
|
2018-12-30 05:10:31 +00:00
|
|
|
# No need to do the check at the site: this user name is not allowed.
|
2019-11-29 14:01:05 +00:00
|
|
|
if not print_found_only:
|
2019-12-19 23:37:12 +00:00
|
|
|
print_invalid(social_network, "Illegal Username Format For This Site!", color)
|
2019-11-29 14:01:05 +00:00
|
|
|
|
2018-12-30 05:10:31 +00:00
|
|
|
results_site["exists"] = "illegal"
|
2019-07-16 11:35:57 +00:00
|
|
|
results_site["url_user"] = ""
|
|
|
|
results_site['http_status'] = ""
|
|
|
|
results_site['response_text'] = ""
|
|
|
|
results_site['response_time_ms'] = ""
|
2018-12-30 05:10:31 +00:00
|
|
|
else:
|
|
|
|
# URL of user on site (if it exists)
|
2018-12-30 22:22:29 +00:00
|
|
|
url = net_info["url"].format(username)
|
2018-12-30 05:10:31 +00:00
|
|
|
results_site["url_user"] = url
|
2019-03-31 15:59:24 +00:00
|
|
|
url_probe = net_info.get("urlProbe")
|
|
|
|
if url_probe is None:
|
2019-11-26 16:42:06 +00:00
|
|
|
# Probe URL is normal one seen by people out on the web.
|
2019-03-31 15:59:24 +00:00
|
|
|
url_probe = url
|
|
|
|
else:
|
2019-11-26 16:42:06 +00:00
|
|
|
# There is a special URL for probing existence separate
|
|
|
|
# from where the user profile normally can be found.
|
2019-03-31 15:59:24 +00:00
|
|
|
url_probe = url_probe.format(username)
|
2018-12-25 19:19:21 +00:00
|
|
|
|
2019-12-07 23:29:07 +00:00
|
|
|
#If only the status_code is needed don't download the body
|
|
|
|
if net_info["errorType"] == 'status_code':
|
|
|
|
request_method = session.head
|
|
|
|
else:
|
|
|
|
request_method = session.get
|
2019-01-03 17:39:43 +00:00
|
|
|
|
Change "response_url" detection strategy completely.
Previously, there was a problem with sites that redirect an attempt to view a non-existing username to the main site. For example, if you try to go to https://devrant.com/users/dfoxxxxxxxxx (a user name that does not exist), then we get a redirect to the https://devrant.com/ root of the site. But, the "response_url" checking algorithm was only looking for the configured error URL being included in the response. So, these sites always indicated that the username was not found.
Update the "response_url" detection method so that the request does not allow redirects. If we get a 200 response of some type, then the username has been found. However, if we get something like a 302, then we know that the username was not found as we are being redirected.
This whole method seems fragile, but I did exhaustively test all of the supported sites, and they all work. So, this change is clearly an improvement.
2019-01-23 02:37:05 +00:00
|
|
|
if net_info["errorType"] == "response_url":
|
2019-01-23 23:00:34 +00:00
|
|
|
# Site forwards request to a different URL if username not
|
|
|
|
# found. Disallow the redirect so we can capture the
|
|
|
|
# http status from the original URL request.
|
Change "response_url" detection strategy completely.
Previously, there was a problem with sites that redirect an attempt to view a non-existing username to the main site. For example, if you try to go to https://devrant.com/users/dfoxxxxxxxxx (a user name that does not exist), then we get a redirect to the https://devrant.com/ root of the site. But, the "response_url" checking algorithm was only looking for the configured error URL being included in the response. So, these sites always indicated that the username was not found.
Update the "response_url" detection method so that the request does not allow redirects. If we get a 200 response of some type, then the username has been found. However, if we get something like a 302, then we know that the username was not found as we are being redirected.
This whole method seems fragile, but I did exhaustively test all of the supported sites, and they all work. So, this change is clearly an improvement.
2019-01-23 02:37:05 +00:00
|
|
|
allow_redirects = False
|
|
|
|
else:
|
2019-01-23 23:00:34 +00:00
|
|
|
# Allow whatever redirect that the site wants to do.
|
|
|
|
# The final result of the request will be what is available.
|
Change "response_url" detection strategy completely.
Previously, there was a problem with sites that redirect an attempt to view a non-existing username to the main site. For example, if you try to go to https://devrant.com/users/dfoxxxxxxxxx (a user name that does not exist), then we get a redirect to the https://devrant.com/ root of the site. But, the "response_url" checking algorithm was only looking for the configured error URL being included in the response. So, these sites always indicated that the username was not found.
Update the "response_url" detection method so that the request does not allow redirects. If we get a 200 response of some type, then the username has been found. However, if we get something like a 302, then we know that the username was not found as we are being redirected.
This whole method seems fragile, but I did exhaustively test all of the supported sites, and they all work. So, this change is clearly an improvement.
2019-01-23 02:37:05 +00:00
|
|
|
allow_redirects = True
|
|
|
|
|
2018-12-30 05:10:31 +00:00
|
|
|
# This future starts running the request in a new thread, doesn't block the main thread
|
2019-01-12 04:55:19 +00:00
|
|
|
if proxy != None:
|
|
|
|
proxies = {"http": proxy, "https": proxy}
|
2019-03-31 15:59:24 +00:00
|
|
|
future = request_method(url=url_probe, headers=headers,
|
Change "response_url" detection strategy completely.
Previously, there was a problem with sites that redirect an attempt to view a non-existing username to the main site. For example, if you try to go to https://devrant.com/users/dfoxxxxxxxxx (a user name that does not exist), then we get a redirect to the https://devrant.com/ root of the site. But, the "response_url" checking algorithm was only looking for the configured error URL being included in the response. So, these sites always indicated that the username was not found.
Update the "response_url" detection method so that the request does not allow redirects. If we get a 200 response of some type, then the username has been found. However, if we get something like a 302, then we know that the username was not found as we are being redirected.
This whole method seems fragile, but I did exhaustively test all of the supported sites, and they all work. So, this change is clearly an improvement.
2019-01-23 02:37:05 +00:00
|
|
|
proxies=proxies,
|
2019-12-04 16:42:29 +00:00
|
|
|
allow_redirects=allow_redirects,
|
2019-12-07 23:24:09 +00:00
|
|
|
timeout=timeout
|
2019-01-24 19:59:06 +00:00
|
|
|
)
|
2019-01-12 04:55:19 +00:00
|
|
|
else:
|
2019-03-31 15:59:24 +00:00
|
|
|
future = request_method(url=url_probe, headers=headers,
|
2019-12-04 16:42:29 +00:00
|
|
|
allow_redirects=allow_redirects,
|
2019-12-07 23:24:09 +00:00
|
|
|
timeout=timeout
|
2019-01-24 19:59:06 +00:00
|
|
|
)
|
2018-12-25 19:19:21 +00:00
|
|
|
|
2018-12-30 05:10:31 +00:00
|
|
|
# Store future in data for access later
|
2018-12-30 22:22:29 +00:00
|
|
|
net_info["request_future"] = future
|
2018-12-30 05:10:31 +00:00
|
|
|
|
|
|
|
# Reset identify for tor (if needed)
|
|
|
|
if unique_tor:
|
|
|
|
underlying_request.reset_identity()
|
|
|
|
|
|
|
|
# Add this site's results into final dictionary with all of the other results.
|
|
|
|
results_total[social_network] = results_site
|
|
|
|
|
2019-01-05 12:43:03 +00:00
|
|
|
# Open the file containing account links
|
2018-12-30 05:10:31 +00:00
|
|
|
# Core logic: If tor requests, make them here. If multi-threaded requests, wait for responses
|
2019-01-06 04:52:53 +00:00
|
|
|
for social_network, net_info in site_data.items():
|
2018-12-25 19:19:21 +00:00
|
|
|
|
2018-12-30 05:10:31 +00:00
|
|
|
# Retrieve results again
|
|
|
|
results_site = results_total.get(social_network)
|
|
|
|
|
|
|
|
# Retrieve other site information again
|
|
|
|
url = results_site.get("url_user")
|
|
|
|
exists = results_site.get("exists")
|
|
|
|
if exists is not None:
|
|
|
|
# We have already determined the user doesn't exist here
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Get the expected error type
|
2018-12-30 22:22:29 +00:00
|
|
|
error_type = net_info["errorType"]
|
2018-12-24 14:31:34 +00:00
|
|
|
|
2018-12-29 04:28:47 +00:00
|
|
|
# Default data in case there are any failures in doing a request.
|
2019-01-12 04:55:19 +00:00
|
|
|
http_status = "?"
|
2018-12-29 04:28:47 +00:00
|
|
|
response_text = ""
|
2018-12-24 14:31:34 +00:00
|
|
|
|
2018-12-25 19:19:21 +00:00
|
|
|
# Retrieve future and ensure it has finished
|
2018-12-30 22:22:29 +00:00
|
|
|
future = net_info["request_future"]
|
2019-01-07 03:33:36 +00:00
|
|
|
r, error_type, response_time = get_response(request_future=future,
|
|
|
|
error_type=error_type,
|
|
|
|
social_network=social_network,
|
2019-01-20 18:34:26 +00:00
|
|
|
verbose=verbose,
|
2019-12-19 23:37:12 +00:00
|
|
|
retry_no=3,
|
|
|
|
color=color)
|
2018-12-25 18:14:39 +00:00
|
|
|
|
2018-12-30 05:10:31 +00:00
|
|
|
# Attempt to get request information
|
|
|
|
try:
|
|
|
|
http_status = r.status_code
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
try:
|
|
|
|
response_text = r.text.encode(r.encoding)
|
|
|
|
except:
|
|
|
|
pass
|
2018-12-25 18:14:39 +00:00
|
|
|
|
2018-12-30 05:10:31 +00:00
|
|
|
if error_type == "message":
|
2018-12-30 22:39:08 +00:00
|
|
|
error = net_info.get("errorMsg")
|
2018-12-30 05:10:31 +00:00
|
|
|
# Checks if the error message is in the HTML
|
|
|
|
if not error in r.text:
|
2019-12-19 23:37:12 +00:00
|
|
|
print_found(social_network, url, response_time, verbose, color)
|
2018-12-30 05:10:31 +00:00
|
|
|
exists = "yes"
|
2018-12-24 14:31:34 +00:00
|
|
|
else:
|
2019-02-26 16:54:49 +00:00
|
|
|
if not print_found_only:
|
2019-12-19 23:37:12 +00:00
|
|
|
print_not_found(social_network, response_time, verbose, color)
|
2018-12-30 05:10:31 +00:00
|
|
|
exists = "no"
|
2018-12-25 18:14:39 +00:00
|
|
|
|
2018-12-24 14:31:34 +00:00
|
|
|
elif error_type == "status_code":
|
2019-01-04 00:04:52 +00:00
|
|
|
# Checks if the status code of the response is 2XX
|
|
|
|
if not r.status_code >= 300 or r.status_code < 200:
|
2019-12-19 23:37:12 +00:00
|
|
|
print_found(social_network, url, response_time, verbose, color)
|
2018-12-30 05:10:31 +00:00
|
|
|
exists = "yes"
|
2018-12-24 14:31:34 +00:00
|
|
|
else:
|
2019-02-26 16:54:49 +00:00
|
|
|
if not print_found_only:
|
2019-12-19 23:37:12 +00:00
|
|
|
print_not_found(social_network, response_time, verbose, color)
|
2018-12-30 05:10:31 +00:00
|
|
|
exists = "no"
|
2018-12-24 14:31:34 +00:00
|
|
|
|
|
|
|
elif error_type == "response_url":
|
Change "response_url" detection strategy completely.
Previously, there was a problem with sites that redirect an attempt to view a non-existing username to the main site. For example, if you try to go to https://devrant.com/users/dfoxxxxxxxxx (a user name that does not exist), then we get a redirect to the https://devrant.com/ root of the site. But, the "response_url" checking algorithm was only looking for the configured error URL being included in the response. So, these sites always indicated that the username was not found.
Update the "response_url" detection method so that the request does not allow redirects. If we get a 200 response of some type, then the username has been found. However, if we get something like a 302, then we know that the username was not found as we are being redirected.
This whole method seems fragile, but I did exhaustively test all of the supported sites, and they all work. So, this change is clearly an improvement.
2019-01-23 02:37:05 +00:00
|
|
|
# For this detection method, we have turned off the redirect.
|
|
|
|
# So, there is no need to check the response URL: it will always
|
|
|
|
# match the request. Instead, we will ensure that the response
|
|
|
|
# code indicates that the request was successful (i.e. no 404, or
|
|
|
|
# forward to some odd redirect).
|
Use compound if statement rather than "if and"
This approach is a bit strange to read the first few time but has advantages in that it is easier to get right and faster to execute.
The interactions between `and`, `or`, `not`, and `()` can get confusing for new coders. For example, the code line 308 does not match the comment on 307 and I believe that in this case the comment correct and the code is wrong (for values < 200) because it is missing parens. I believe that __200 <= status_code < 300__ produces the correct results in a readable (semi-)intuitive code.
```
>>> for status_code in (-1, 1, 199, 200, 201, 299, 300, 301, 1000):
... print(not status_code >= 300 or status_code < 200,
... not (status_code >= 300 or status_code < 200),
... 200 <= status_code < 300)
...
True False False
True False False
True False False
True True True
True True True
True True True
False False False
False False False
False False False
```
2019-03-07 22:57:56 +00:00
|
|
|
if 200 <= r.status_code < 300:
|
Change "response_url" detection strategy completely.
Previously, there was a problem with sites that redirect an attempt to view a non-existing username to the main site. For example, if you try to go to https://devrant.com/users/dfoxxxxxxxxx (a user name that does not exist), then we get a redirect to the https://devrant.com/ root of the site. But, the "response_url" checking algorithm was only looking for the configured error URL being included in the response. So, these sites always indicated that the username was not found.
Update the "response_url" detection method so that the request does not allow redirects. If we get a 200 response of some type, then the username has been found. However, if we get something like a 302, then we know that the username was not found as we are being redirected.
This whole method seems fragile, but I did exhaustively test all of the supported sites, and they all work. So, this change is clearly an improvement.
2019-01-23 02:37:05 +00:00
|
|
|
#
|
2019-12-19 23:37:12 +00:00
|
|
|
print_found(social_network, url, response_time, verbose, color)
|
2018-12-30 05:10:31 +00:00
|
|
|
exists = "yes"
|
2018-12-24 14:31:34 +00:00
|
|
|
else:
|
2019-02-26 16:54:49 +00:00
|
|
|
if not print_found_only:
|
2019-12-19 23:37:12 +00:00
|
|
|
print_not_found(social_network, response_time, verbose, color)
|
2018-12-30 05:10:31 +00:00
|
|
|
exists = "no"
|
|
|
|
|
|
|
|
elif error_type == "":
|
2019-07-11 19:17:25 +00:00
|
|
|
if not print_found_only:
|
2019-12-19 23:37:12 +00:00
|
|
|
print_invalid(social_network, "Error!", color)
|
2018-12-30 05:10:31 +00:00
|
|
|
exists = "error"
|
2018-12-29 04:28:47 +00:00
|
|
|
|
|
|
|
# Save exists flag
|
2019-01-07 03:33:36 +00:00
|
|
|
results_site['exists'] = exists
|
2018-12-29 04:28:47 +00:00
|
|
|
|
|
|
|
# Save results from request
|
2019-01-07 03:33:36 +00:00
|
|
|
results_site['http_status'] = http_status
|
2018-12-29 04:28:47 +00:00
|
|
|
results_site['response_text'] = response_text
|
2019-01-07 03:33:36 +00:00
|
|
|
results_site['response_time_ms'] = response_time
|
2018-12-29 04:28:47 +00:00
|
|
|
|
|
|
|
# Add this site's results into final dictionary with all of the other results.
|
|
|
|
results_total[social_network] = results_site
|
|
|
|
return results_total
|
2018-12-27 03:54:25 +00:00
|
|
|
|
|
|
|
|
2019-12-07 23:24:09 +00:00
|
|
|
def timeout_check(value):
|
|
|
|
"""Check Timeout Argument.
|
|
|
|
|
|
|
|
Checks timeout for validity.
|
|
|
|
|
|
|
|
Keyword Arguments:
|
|
|
|
value -- Time in seconds to wait before timing out request.
|
|
|
|
|
|
|
|
Return Value:
|
|
|
|
Floating point number representing the time (in seconds) that should be
|
|
|
|
used for the timeout.
|
|
|
|
|
|
|
|
NOTE: Will raise an exception if the timeout in invalid.
|
|
|
|
"""
|
|
|
|
from argparse import ArgumentTypeError
|
|
|
|
|
|
|
|
try:
|
|
|
|
timeout = float(value)
|
|
|
|
except:
|
|
|
|
raise ArgumentTypeError(f"Timeout '{value}' must be a number.")
|
|
|
|
if timeout <= 0:
|
|
|
|
raise ArgumentTypeError(f"Timeout '{value}' must be greater than 0.0s.")
|
|
|
|
return timeout
|
|
|
|
|
|
|
|
|
2018-12-28 19:15:41 +00:00
|
|
|
def main():
|
2019-01-03 10:18:04 +00:00
|
|
|
# Colorama module's initialization.
|
2019-01-04 05:17:23 +00:00
|
|
|
init(autoreset=True)
|
2019-01-03 10:18:04 +00:00
|
|
|
|
2019-05-15 12:39:39 +00:00
|
|
|
version_string = f"%(prog)s {__version__}\n" + \
|
|
|
|
f"{requests.__description__}: {requests.__version__}\n" + \
|
|
|
|
f"Python: {platform.python_version()}"
|
2018-12-27 03:54:25 +00:00
|
|
|
|
|
|
|
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
|
2019-05-15 12:39:39 +00:00
|
|
|
description=f"{module_name} (Version {__version__})"
|
2019-01-12 04:55:19 +00:00
|
|
|
)
|
2018-12-27 03:54:25 +00:00
|
|
|
parser.add_argument("--version",
|
|
|
|
action="version", version=version_string,
|
|
|
|
help="Display version information and dependencies."
|
2019-01-12 04:55:19 +00:00
|
|
|
)
|
2018-12-27 03:54:25 +00:00
|
|
|
parser.add_argument("--verbose", "-v", "-d", "--debug",
|
|
|
|
action="store_true", dest="verbose", default=False,
|
2019-01-07 17:04:55 +00:00
|
|
|
help="Display extra debugging information and metrics."
|
2019-01-12 04:55:19 +00:00
|
|
|
)
|
2019-01-25 15:05:38 +00:00
|
|
|
parser.add_argument("--rank", "-r",
|
|
|
|
action="store_true", dest="rank", default=False,
|
2019-01-25 15:10:03 +00:00
|
|
|
help="Present websites ordered by their Alexa.com global rank in popularity.")
|
2019-01-24 19:59:06 +00:00
|
|
|
parser.add_argument("--folderoutput", "-fo", dest="folderoutput",
|
2019-11-26 16:42:06 +00:00
|
|
|
help="If using multiple usernames, the output of the results will be saved to this folder."
|
2019-01-24 19:59:06 +00:00
|
|
|
)
|
|
|
|
parser.add_argument("--output", "-o", dest="output",
|
2019-11-26 16:42:06 +00:00
|
|
|
help="If using single username, the output of the result will be saved to this file."
|
2019-01-12 04:55:19 +00:00
|
|
|
)
|
2018-12-29 00:50:25 +00:00
|
|
|
parser.add_argument("--tor", "-t",
|
|
|
|
action="store_true", dest="tor", default=False,
|
2019-04-10 09:15:55 +00:00
|
|
|
help="Make requests over Tor; increases runtime; requires Tor to be installed and in system path.")
|
2018-12-29 01:31:31 +00:00
|
|
|
parser.add_argument("--unique-tor", "-u",
|
|
|
|
action="store_true", dest="unique_tor", default=False,
|
2019-04-10 09:15:55 +00:00
|
|
|
help="Make requests over Tor with new Tor circuit after each request; increases runtime; requires Tor to be installed and in system path.")
|
2018-12-29 04:32:30 +00:00
|
|
|
parser.add_argument("--csv",
|
|
|
|
action="store_true", dest="csv", default=False,
|
|
|
|
help="Create Comma-Separated Values (CSV) File."
|
2019-01-12 04:55:19 +00:00
|
|
|
)
|
2019-01-06 04:52:53 +00:00
|
|
|
parser.add_argument("--site",
|
|
|
|
action="append", metavar='SITE_NAME',
|
|
|
|
dest="site_list", default=None,
|
2019-11-26 16:42:06 +00:00
|
|
|
help="Limit analysis to just the listed sites. Add multiple options to specify more than one site."
|
2019-01-12 04:55:19 +00:00
|
|
|
)
|
|
|
|
parser.add_argument("--proxy", "-p", metavar='PROXY_URL',
|
|
|
|
action="store", dest="proxy", default=None,
|
|
|
|
help="Make requests over a proxy. e.g. socks5://127.0.0.1:1080"
|
|
|
|
)
|
2019-01-23 23:00:34 +00:00
|
|
|
parser.add_argument("--json", "-j", metavar="JSON_FILE",
|
|
|
|
dest="json_file", default="data.json",
|
|
|
|
help="Load data from a JSON file or an online, valid, JSON file.")
|
2019-01-20 18:14:02 +00:00
|
|
|
parser.add_argument("--proxy_list", "-pl", metavar='PROXY_LIST',
|
|
|
|
action="store", dest="proxy_list", default=None,
|
|
|
|
help="Make requests over a proxy randomly chosen from a list generated from a .csv file."
|
|
|
|
)
|
|
|
|
parser.add_argument("--check_proxies", "-cp", metavar='CHECK_PROXY',
|
|
|
|
action="store", dest="check_prox", default=None,
|
|
|
|
help="To be used with the '--proxy_list' parameter. "
|
|
|
|
"The script will check if the proxies supplied in the .csv file are working and anonymous."
|
|
|
|
"Put 0 for no limit on successfully checked proxies, or another number to institute a limit."
|
|
|
|
)
|
2019-12-07 23:24:09 +00:00
|
|
|
parser.add_argument("--timeout",
|
|
|
|
action="store", metavar='TIMEOUT',
|
|
|
|
dest="timeout", type=timeout_check, default=None,
|
|
|
|
help="Time (in seconds) to wait for response to requests. "
|
|
|
|
"Default timeout of 60.0s."
|
|
|
|
"A longer timeout will be more likely to get results from slow sites."
|
|
|
|
"On the other hand, this may cause a long delay to gather all results."
|
|
|
|
)
|
2019-02-26 16:54:49 +00:00
|
|
|
parser.add_argument("--print-found",
|
|
|
|
action="store_true", dest="print_found_only", default=False,
|
2019-02-26 04:35:52 +00:00
|
|
|
help="Do not output sites where the username was not found."
|
|
|
|
)
|
2019-12-19 23:37:12 +00:00
|
|
|
parser.add_argument("--no-color",
|
|
|
|
action="store_true", dest="no_color", default=False,
|
|
|
|
help="Don't color terminal output"
|
|
|
|
)
|
2018-12-27 03:54:25 +00:00
|
|
|
parser.add_argument("username",
|
|
|
|
nargs='+', metavar='USERNAMES',
|
|
|
|
action="store",
|
|
|
|
help="One or more usernames to check with social networks."
|
2019-01-12 04:55:19 +00:00
|
|
|
)
|
2018-12-26 17:25:28 +00:00
|
|
|
|
|
|
|
args = parser.parse_args()
|
2018-12-26 08:26:03 +00:00
|
|
|
|
2018-12-27 03:54:25 +00:00
|
|
|
|
2019-01-12 04:55:19 +00:00
|
|
|
# Argument check
|
|
|
|
# TODO regex check on args.proxy
|
2019-01-20 18:16:06 +00:00
|
|
|
if args.tor and (args.proxy != None or args.proxy_list != None):
|
2019-04-10 09:15:55 +00:00
|
|
|
raise Exception("Tor and Proxy cannot be set in the meantime.")
|
2019-01-12 04:55:19 +00:00
|
|
|
|
2019-01-20 18:18:24 +00:00
|
|
|
# Proxy argument check.
|
|
|
|
# Does not necessarily need to throw an error,
|
|
|
|
# since we could join the single proxy with the ones generated from the .csv,
|
|
|
|
# but it seems unnecessarily complex at this time.
|
|
|
|
if args.proxy != None and args.proxy_list != None:
|
|
|
|
raise Exception("A single proxy cannot be used along with proxy list.")
|
|
|
|
|
2019-01-12 04:55:19 +00:00
|
|
|
# Make prompts
|
|
|
|
if args.proxy != None:
|
|
|
|
print("Using the proxy: " + args.proxy)
|
2019-01-20 18:22:44 +00:00
|
|
|
|
|
|
|
global proxy_list
|
|
|
|
|
|
|
|
if args.proxy_list != None:
|
2019-12-19 23:37:12 +00:00
|
|
|
print_info("Loading proxies from", args.proxy_list, not args.color)
|
2019-01-20 18:22:44 +00:00
|
|
|
|
|
|
|
proxy_list = load_proxies_from_csv(args.proxy_list)
|
|
|
|
|
2019-01-20 18:26:17 +00:00
|
|
|
# Checking if proxies should be checked for anonymity.
|
|
|
|
if args.check_prox != None and args.proxy_list != None:
|
|
|
|
try:
|
|
|
|
limit = int(args.check_prox)
|
|
|
|
if limit == 0:
|
|
|
|
proxy_list = check_proxy_list(proxy_list)
|
|
|
|
elif limit > 0:
|
|
|
|
proxy_list = check_proxy_list(proxy_list, limit)
|
|
|
|
else:
|
|
|
|
raise ValueError
|
|
|
|
except ValueError:
|
2019-11-26 16:42:06 +00:00
|
|
|
raise Exception("Parameter --check_proxies/-cp must be a positive integer.")
|
2019-01-20 18:26:17 +00:00
|
|
|
|
2018-12-29 01:31:31 +00:00
|
|
|
if args.tor or args.unique_tor:
|
2019-04-10 09:15:55 +00:00
|
|
|
print("Using Tor to make requests")
|
|
|
|
print("Warning: some websites might refuse connecting over Tor, so note that using this option might increase connection errors.")
|
2018-12-25 18:14:39 +00:00
|
|
|
|
2019-01-24 19:59:06 +00:00
|
|
|
# Check if both output methods are entered as input.
|
|
|
|
if args.output is not None and args.folderoutput is not None:
|
|
|
|
print("You can only use one of the output methods.")
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
# Check validity for single username output.
|
|
|
|
if args.output is not None and len(args.username) != 1:
|
|
|
|
print("You can only use --output with a single username")
|
|
|
|
sys.exit(1)
|
|
|
|
|
2019-01-23 23:00:34 +00:00
|
|
|
response_json_online = None
|
|
|
|
site_data_all = None
|
|
|
|
|
|
|
|
# Try to load json from website.
|
|
|
|
try:
|
|
|
|
response_json_online = requests.get(url=args.json_file)
|
2019-01-24 19:59:06 +00:00
|
|
|
except requests.exceptions.MissingSchema: # In case the schema is wrong it's because it may not be a website
|
2019-01-23 23:00:34 +00:00
|
|
|
pass
|
|
|
|
|
|
|
|
# Check if the response is appropriate.
|
|
|
|
if response_json_online is not None and response_json_online.status_code == 200:
|
|
|
|
# Since we got data from a website, try to load json and exit if parsing fails.
|
2019-01-24 19:59:06 +00:00
|
|
|
try:
|
2019-01-23 23:00:34 +00:00
|
|
|
site_data_all = response_json_online.json()
|
|
|
|
except ValueError:
|
|
|
|
print("Invalid JSON from website!")
|
|
|
|
sys.exit(1)
|
|
|
|
pass
|
2019-01-06 04:52:53 +00:00
|
|
|
|
2019-01-23 23:00:34 +00:00
|
|
|
data_file_path = os.path.join(os.path.dirname(
|
|
|
|
os.path.realpath(__file__)), args.json_file)
|
|
|
|
# This will be none if the request had a missing schema
|
|
|
|
if site_data_all is None:
|
|
|
|
# Check if the file exists otherwise exit.
|
|
|
|
if not os.path.exists(data_file_path):
|
2019-11-26 16:42:06 +00:00
|
|
|
print("JSON file doesn't exist.")
|
2019-01-24 19:59:06 +00:00
|
|
|
print(
|
|
|
|
"If this is not a file but a website, make sure you have appended http:// or https://.")
|
2019-01-23 23:00:34 +00:00
|
|
|
sys.exit(1)
|
|
|
|
else:
|
|
|
|
raw = open(data_file_path, "r", encoding="utf-8")
|
|
|
|
try:
|
|
|
|
site_data_all = json.load(raw)
|
|
|
|
except:
|
|
|
|
print("Invalid JSON loaded from file.")
|
2019-01-24 19:59:06 +00:00
|
|
|
|
2019-01-06 04:52:53 +00:00
|
|
|
if args.site_list is None:
|
|
|
|
# Not desired to look at a sub-set of sites
|
|
|
|
site_data = site_data_all
|
|
|
|
else:
|
|
|
|
# User desires to selectively run queries on a sub-set of the site list.
|
|
|
|
|
|
|
|
# Make sure that the sites are supported & build up pruned site database.
|
|
|
|
site_data = {}
|
|
|
|
site_missing = []
|
|
|
|
for site in args.site_list:
|
2019-01-09 17:43:00 +00:00
|
|
|
for existing_site in site_data_all:
|
|
|
|
if site.lower() == existing_site.lower():
|
|
|
|
site_data[existing_site] = site_data_all[existing_site]
|
2019-01-09 17:46:02 +00:00
|
|
|
if not site_data:
|
2019-01-06 04:52:53 +00:00
|
|
|
# Build up list of sites not supported for future error message.
|
2019-05-15 12:39:39 +00:00
|
|
|
site_missing.append(f"'{site}'")
|
2019-01-06 04:52:53 +00:00
|
|
|
|
2019-01-09 17:46:02 +00:00
|
|
|
if site_missing:
|
2019-01-12 04:55:19 +00:00
|
|
|
print(
|
2019-05-15 12:39:39 +00:00
|
|
|
f"Error: Desired sites not found: {', '.join(site_missing)}.")
|
2019-01-06 04:52:53 +00:00
|
|
|
sys.exit(1)
|
|
|
|
|
2019-01-25 15:05:38 +00:00
|
|
|
if args.rank:
|
2019-03-08 05:40:58 +00:00
|
|
|
# Sort data by rank
|
2019-01-25 15:05:38 +00:00
|
|
|
site_dataCpy = dict(site_data)
|
2019-01-25 17:36:38 +00:00
|
|
|
ranked_sites = sorted(site_data, key=lambda k: ("rank" not in k, site_data[k].get("rank", sys.maxsize)))
|
2019-01-25 15:05:38 +00:00
|
|
|
site_data = {}
|
2019-01-25 17:36:38 +00:00
|
|
|
for site in ranked_sites:
|
2019-01-25 15:05:38 +00:00
|
|
|
site_data[site] = site_dataCpy.get(site)
|
|
|
|
|
2018-12-29 01:45:19 +00:00
|
|
|
# Run report on all specified users.
|
2018-12-27 03:54:25 +00:00
|
|
|
for username in args.username:
|
|
|
|
print()
|
2019-01-24 19:59:06 +00:00
|
|
|
|
|
|
|
if args.output:
|
|
|
|
file = open(args.output, "w", encoding="utf-8")
|
|
|
|
elif args.folderoutput: # In case we handle multiple usernames at a targetted folder.
|
|
|
|
# If the folder doesnt exist, create it first
|
|
|
|
if not os.path.isdir(args.folderoutput):
|
|
|
|
os.mkdir(args.folderoutput)
|
|
|
|
file = open(os.path.join(args.folderoutput,
|
|
|
|
username + ".txt"), "w", encoding="utf-8")
|
|
|
|
else:
|
|
|
|
file = open(username + ".txt", "w", encoding="utf-8")
|
2019-01-20 18:31:08 +00:00
|
|
|
|
|
|
|
# We try to ad a random member of the 'proxy_list' var as the proxy of the request.
|
|
|
|
# If we can't access the list or it is empty, we proceed with args.proxy as the proxy.
|
|
|
|
try:
|
|
|
|
random_proxy = random.choice(proxy_list)
|
2019-05-15 12:39:39 +00:00
|
|
|
proxy = f'{random_proxy.protocol}://{random_proxy.ip}:{random_proxy.port}'
|
2019-01-20 18:31:08 +00:00
|
|
|
except (NameError, IndexError):
|
|
|
|
proxy = args.proxy
|
|
|
|
|
2019-12-07 23:27:11 +00:00
|
|
|
results = sherlock(username,
|
|
|
|
site_data,
|
|
|
|
verbose=args.verbose,
|
|
|
|
tor=args.tor,
|
|
|
|
unique_tor=args.unique_tor,
|
2019-12-07 23:24:09 +00:00
|
|
|
proxy=args.proxy,
|
|
|
|
print_found_only=args.print_found_only,
|
2019-12-19 23:37:12 +00:00
|
|
|
timeout=args.timeout,
|
|
|
|
color=not args.no_color)
|
2018-12-25 18:14:39 +00:00
|
|
|
|
2019-01-24 19:59:06 +00:00
|
|
|
exists_counter = 0
|
|
|
|
for website_name in results:
|
|
|
|
dictionary = results[website_name]
|
|
|
|
if dictionary.get("exists") == "yes":
|
|
|
|
exists_counter += 1
|
|
|
|
file.write(dictionary["url_user"] + "\n")
|
2019-12-07 23:25:18 +00:00
|
|
|
file.write(f"Total Websites Username Detected On : {exists_counter}")
|
2019-01-24 19:59:06 +00:00
|
|
|
file.close()
|
|
|
|
|
2018-12-29 04:32:30 +00:00
|
|
|
if args.csv == True:
|
2019-01-06 02:39:56 +00:00
|
|
|
with open(username + ".csv", "w", newline='', encoding="utf-8") as csv_report:
|
2018-12-29 04:32:30 +00:00
|
|
|
writer = csv.writer(csv_report)
|
|
|
|
writer.writerow(['username',
|
|
|
|
'name',
|
|
|
|
'url_main',
|
|
|
|
'url_user',
|
|
|
|
'exists',
|
2019-01-07 03:33:36 +00:00
|
|
|
'http_status',
|
|
|
|
'response_time_ms'
|
2019-01-12 04:55:19 +00:00
|
|
|
]
|
|
|
|
)
|
2018-12-29 04:32:30 +00:00
|
|
|
for site in results:
|
|
|
|
writer.writerow([username,
|
|
|
|
site,
|
|
|
|
results[site]['url_main'],
|
|
|
|
results[site]['url_user'],
|
|
|
|
results[site]['exists'],
|
2019-01-07 03:33:36 +00:00
|
|
|
results[site]['http_status'],
|
|
|
|
results[site]['response_time_ms']
|
2019-01-12 04:55:19 +00:00
|
|
|
]
|
|
|
|
)
|
2018-12-24 14:31:34 +00:00
|
|
|
|
2019-01-07 03:33:36 +00:00
|
|
|
|
2018-12-28 19:15:41 +00:00
|
|
|
if __name__ == "__main__":
|
2019-01-27 10:27:03 +00:00
|
|
|
main()
|