mirror of
https://github.com/sherlock-project/sherlock
synced 2024-11-15 08:47:08 +00:00
Fixed mixed of tabs and spaces in a single file.
This commit is contained in:
parent
05353201b3
commit
8b6b14fc6a
1 changed files with 36 additions and 36 deletions
72
site_list.py
72
site_list.py
|
@ -12,18 +12,18 @@ from argparse import ArgumentParser, RawDescriptionHelpFormatter
|
|||
pool = list()
|
||||
|
||||
def get_rank(domain_to_query, dest):
|
||||
result = -1
|
||||
url = "http://www.alexa.com/siteinfo/" + domain_to_query
|
||||
page = requests.get(url).text
|
||||
soup = bs(page, features="lxml")
|
||||
for span in soup.find_all('span'):
|
||||
if span.has_attr("class"):
|
||||
if "globleRank" in span["class"]:
|
||||
for strong in span.find_all("strong"):
|
||||
if strong.has_attr("class"):
|
||||
if "metrics-data" in strong["class"]:
|
||||
result = int(strong.text.strip().replace(',', ''))
|
||||
dest['rank'] = result
|
||||
result = -1
|
||||
url = "http://www.alexa.com/siteinfo/" + domain_to_query
|
||||
page = requests.get(url).text
|
||||
soup = bs(page, features="lxml")
|
||||
for span in soup.find_all('span'):
|
||||
if span.has_attr("class"):
|
||||
if "globleRank" in span["class"]:
|
||||
for strong in span.find_all("strong"):
|
||||
if strong.has_attr("class"):
|
||||
if "metrics-data" in strong["class"]:
|
||||
result = int(strong.text.strip().replace(',', ''))
|
||||
dest['rank'] = result
|
||||
|
||||
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter
|
||||
)
|
||||
|
@ -34,38 +34,38 @@ parser.add_argument("--rank","-r",
|
|||
args = parser.parse_args()
|
||||
|
||||
with open("data.json", "r", encoding="utf-8") as data_file:
|
||||
data = json.load(data_file)
|
||||
data = json.load(data_file)
|
||||
|
||||
with open("sites.md", "w") as site_file:
|
||||
data_length = len(data)
|
||||
site_file.write(f'## List Of Supported Sites ({data_length} Sites In Total!)\n')
|
||||
data_length = len(data)
|
||||
site_file.write(f'## List Of Supported Sites ({data_length} Sites In Total!)\n')
|
||||
|
||||
for social_network in data:
|
||||
url_main = data.get(social_network).get("urlMain")
|
||||
data.get(social_network)["rank"] = 0
|
||||
if args.rank:
|
||||
th = threading.Thread(target=get_rank, args=(url_main, data.get(social_network)))
|
||||
else:
|
||||
th = None
|
||||
pool.append((social_network, url_main, th))
|
||||
if args.rank:
|
||||
th.start()
|
||||
for social_network in data:
|
||||
url_main = data.get(social_network).get("urlMain")
|
||||
data.get(social_network)["rank"] = 0
|
||||
if args.rank:
|
||||
th = threading.Thread(target=get_rank, args=(url_main, data.get(social_network)))
|
||||
else:
|
||||
th = None
|
||||
pool.append((social_network, url_main, th))
|
||||
if args.rank:
|
||||
th.start()
|
||||
|
||||
index = 1
|
||||
for social_network, url_main, th in pool:
|
||||
if args.rank:
|
||||
th.join()
|
||||
site_file.write(f'{index}. [{social_network}]({url_main})\n')
|
||||
sys.stdout.write("\r{0}".format(f"Updated {index} out of {data_length} entries"))
|
||||
sys.stdout.flush()
|
||||
index = index + 1
|
||||
index = 1
|
||||
for social_network, url_main, th in pool:
|
||||
if args.rank:
|
||||
th.join()
|
||||
site_file.write(f'{index}. [{social_network}]({url_main})\n')
|
||||
sys.stdout.write("\r{0}".format(f"Updated {index} out of {data_length} entries"))
|
||||
sys.stdout.flush()
|
||||
index = index + 1
|
||||
|
||||
if args.rank:
|
||||
site_file.write(f'\nAlexa.com rank data fetched at ({datetime.utcnow()} UTC)\n')
|
||||
if args.rank:
|
||||
site_file.write(f'\nAlexa.com rank data fetched at ({datetime.utcnow()} UTC)\n')
|
||||
|
||||
sorted_json_data = json.dumps(data, indent=2, sort_keys=True)
|
||||
|
||||
with open("data.json", "w") as data_file:
|
||||
data_file.write(sorted_json_data)
|
||||
data_file.write(sorted_json_data)
|
||||
|
||||
print("\nFinished updating supported site listing!")
|
||||
|
|
Loading…
Reference in a new issue