2022-02-13 16:33:57 +00:00
|
|
|
import math, re, time
|
2021-01-20 21:37:59 +00:00
|
|
|
from modules import util
|
|
|
|
from modules.util import Failed
|
2021-07-29 02:26:39 +00:00
|
|
|
from urllib.parse import urlparse, parse_qs
|
2021-01-20 21:37:59 +00:00
|
|
|
|
2022-02-13 16:33:57 +00:00
|
|
|
logger = util.logger
|
2021-01-20 21:37:59 +00:00
|
|
|
|
2021-12-10 08:13:26 +00:00
|
|
|
builders = ["imdb_list", "imdb_id", "imdb_chart"]
|
|
|
|
movie_charts = ["box_office", "popular_movies", "top_movies", "top_english", "top_indian", "lowest_rated"]
|
|
|
|
show_charts = ["popular_shows", "top_shows"]
|
|
|
|
charts = {
|
|
|
|
"box_office": "Box Office",
|
|
|
|
"popular_movies": "Most Popular Movies",
|
|
|
|
"popular_shows": "Most Popular TV Shows",
|
|
|
|
"top_movies": "Top 250 Movies",
|
|
|
|
"top_shows": "Top 250 TV Shows",
|
|
|
|
"top_english": "Top Rated English Movies",
|
|
|
|
"top_indian": "Top Rated Indian Movies",
|
|
|
|
"lowest_rated": "Lowest Rated Movies"
|
|
|
|
}
|
|
|
|
|
2021-07-14 14:47:20 +00:00
|
|
|
base_url = "https://www.imdb.com"
|
|
|
|
urls = {
|
2021-11-03 15:52:41 +00:00
|
|
|
"lists": f"{base_url}/list/ls",
|
|
|
|
"searches": f"{base_url}/search/title/",
|
|
|
|
"keyword_searches": f"{base_url}/search/keyword/",
|
|
|
|
"filmography_searches": f"{base_url}/filmosearch/"
|
2021-07-14 14:47:20 +00:00
|
|
|
}
|
2021-03-30 05:50:53 +00:00
|
|
|
|
2021-06-14 15:24:11 +00:00
|
|
|
class IMDb:
|
2021-03-05 20:33:24 +00:00
|
|
|
def __init__(self, config):
|
|
|
|
self.config = config
|
2021-01-20 21:37:59 +00:00
|
|
|
|
2021-07-21 17:40:05 +00:00
|
|
|
def validate_imdb_lists(self, imdb_lists, language):
|
|
|
|
valid_lists = []
|
2021-07-29 02:26:39 +00:00
|
|
|
for imdb_dict in util.get_list(imdb_lists, split=False):
|
|
|
|
if not isinstance(imdb_dict, dict):
|
|
|
|
imdb_dict = {"url": imdb_dict}
|
|
|
|
dict_methods = {dm.lower(): dm for dm in imdb_dict}
|
2021-12-13 07:30:19 +00:00
|
|
|
if "url" not in dict_methods:
|
|
|
|
raise Failed(f"Collection Error: imdb_list url attribute not found")
|
|
|
|
elif imdb_dict[dict_methods["url"]] is None:
|
|
|
|
raise Failed(f"Collection Error: imdb_list url attribute is blank")
|
|
|
|
else:
|
|
|
|
imdb_url = imdb_dict[dict_methods["url"]].strip()
|
2021-11-14 19:14:47 +00:00
|
|
|
if not imdb_url.startswith(tuple([v for k, v in urls.items()])):
|
2021-11-03 15:52:41 +00:00
|
|
|
fails = "\n".join([f"{v} (For {k.replace('_', ' ').title()})" for k, v in urls.items()])
|
|
|
|
raise Failed(f"IMDb Error: {imdb_url} must begin with either:{fails}")
|
2021-07-29 02:26:39 +00:00
|
|
|
self._total(imdb_url, language)
|
2021-12-13 07:30:19 +00:00
|
|
|
list_count = None
|
|
|
|
if "limit" in dict_methods:
|
|
|
|
if imdb_dict[dict_methods["limit"]] is None:
|
|
|
|
logger.warning(f"Collection Warning: imdb_list limit attribute is blank using 0 as default")
|
|
|
|
else:
|
|
|
|
try:
|
|
|
|
value = int(str(imdb_dict[dict_methods["limit"]]))
|
|
|
|
if 0 <= value:
|
|
|
|
list_count = value
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
if list_count is None:
|
|
|
|
logger.warning(f"Collection Warning: imdb_list limit attribute must be an integer 0 or greater using 0 as default")
|
|
|
|
if list_count is None:
|
|
|
|
list_count = 0
|
2021-07-21 17:40:05 +00:00
|
|
|
valid_lists.append({"url": imdb_url, "limit": list_count})
|
|
|
|
return valid_lists
|
|
|
|
|
2021-05-07 19:53:54 +00:00
|
|
|
def _total(self, imdb_url, language):
|
2021-11-03 15:52:41 +00:00
|
|
|
if imdb_url.startswith(urls["lists"]):
|
|
|
|
xpath_total = "//div[@class='desc lister-total-num-results']/text()"
|
|
|
|
per_page = 100
|
|
|
|
elif imdb_url.startswith(urls["searches"]):
|
|
|
|
xpath_total = "//div[@class='desc']/span/text()"
|
|
|
|
per_page = 250
|
2021-04-05 15:12:57 +00:00
|
|
|
else:
|
2021-11-03 15:52:41 +00:00
|
|
|
xpath_total = "//div[@class='desc']/text()"
|
|
|
|
per_page = 50
|
|
|
|
results = self.config.get_html(imdb_url, headers=util.header(language)).xpath(xpath_total)
|
2021-07-29 02:26:39 +00:00
|
|
|
total = 0
|
|
|
|
for result in results:
|
|
|
|
if "title" in result:
|
|
|
|
try:
|
|
|
|
total = int(re.findall("(\\d+) title", result.replace(",", ""))[0])
|
|
|
|
break
|
|
|
|
except IndexError:
|
|
|
|
pass
|
|
|
|
if total > 0:
|
2021-11-03 15:52:41 +00:00
|
|
|
return total, per_page
|
2021-10-04 17:51:32 +00:00
|
|
|
raise Failed(f"IMDb Error: Failed to parse URL: {imdb_url}")
|
2021-04-15 18:39:47 +00:00
|
|
|
|
2021-05-07 19:53:54 +00:00
|
|
|
def _ids_from_url(self, imdb_url, language, limit):
|
2021-07-29 02:26:39 +00:00
|
|
|
total, item_count = self._total(imdb_url, language)
|
2021-07-14 14:47:20 +00:00
|
|
|
headers = util.header(language)
|
2021-04-15 18:39:47 +00:00
|
|
|
imdb_ids = []
|
2021-07-29 02:26:39 +00:00
|
|
|
parsed_url = urlparse(imdb_url)
|
|
|
|
params = parse_qs(parsed_url.query)
|
|
|
|
imdb_base = parsed_url._replace(query=None).geturl()
|
2021-11-03 14:38:43 +00:00
|
|
|
params.pop("start", None) # noqa
|
|
|
|
params.pop("count", None) # noqa
|
|
|
|
params.pop("page", None) # noqa
|
|
|
|
if self.config.trace_mode:
|
|
|
|
logger.debug(f"URL: {imdb_base}")
|
|
|
|
logger.debug(f"Params: {params}")
|
2021-11-03 15:52:41 +00:00
|
|
|
search_url = imdb_base.startswith(urls["searches"])
|
2021-07-29 02:26:39 +00:00
|
|
|
if limit < 1 or total < limit:
|
|
|
|
limit = total
|
2021-04-05 15:12:57 +00:00
|
|
|
remainder = limit % item_count
|
2021-07-29 02:26:39 +00:00
|
|
|
if remainder == 0:
|
|
|
|
remainder = item_count
|
2021-04-05 15:12:57 +00:00
|
|
|
num_of_pages = math.ceil(int(limit) / item_count)
|
2021-01-20 21:37:59 +00:00
|
|
|
for i in range(1, num_of_pages + 1):
|
2021-04-05 15:12:57 +00:00
|
|
|
start_num = (i - 1) * item_count + 1
|
2022-02-13 16:33:57 +00:00
|
|
|
logger.ghost(f"Parsing Page {i}/{num_of_pages} {start_num}-{limit if i == num_of_pages else i * item_count}")
|
2021-11-03 15:52:41 +00:00
|
|
|
if search_url:
|
2021-11-03 14:38:43 +00:00
|
|
|
params["count"] = remainder if i == num_of_pages else item_count # noqa
|
|
|
|
params["start"] = start_num # noqa
|
2021-11-03 15:52:41 +00:00
|
|
|
else:
|
|
|
|
params["page"] = i # noqa
|
|
|
|
response = self.config.get_html(imdb_base, headers=headers, params=params)
|
|
|
|
ids_found = response.xpath("//div[contains(@class, 'lister-item-image')]//a/img//@data-tconst")
|
|
|
|
if not search_url and i == num_of_pages:
|
2021-07-29 02:26:39 +00:00
|
|
|
ids_found = ids_found[:remainder]
|
|
|
|
imdb_ids.extend(ids_found)
|
2021-07-14 14:47:20 +00:00
|
|
|
time.sleep(2)
|
2022-02-13 16:33:57 +00:00
|
|
|
logger.exorcise()
|
2021-07-29 02:26:39 +00:00
|
|
|
if len(imdb_ids) > 0:
|
2021-07-29 13:36:30 +00:00
|
|
|
logger.debug(f"{len(imdb_ids)} IMDb IDs Found: {imdb_ids}")
|
2021-07-29 02:26:39 +00:00
|
|
|
return imdb_ids
|
2021-10-04 17:51:32 +00:00
|
|
|
raise Failed(f"IMDb Error: No IMDb IDs Found at {imdb_url}")
|
2021-01-20 21:37:59 +00:00
|
|
|
|
2022-03-15 15:52:55 +00:00
|
|
|
def parental_guide(self, imdb_id, ignore_cache=False):
|
|
|
|
parental_dict = {}
|
|
|
|
expired = None
|
|
|
|
if self.config.Cache and not ignore_cache:
|
2022-03-19 16:02:52 +00:00
|
|
|
parental_dict, expired = self.config.Cache.query_imdb_parental(imdb_id, self.config.Cache.expiration)
|
2022-03-15 15:52:55 +00:00
|
|
|
if parental_dict and expired is False:
|
|
|
|
return parental_dict
|
|
|
|
response = self.config.get_html(f"https://www.imdb.com/title/{imdb_id}/parentalguide")
|
|
|
|
for ptype in ["nudity", "violence", "profanity", "alcohol", "frightening"]:
|
|
|
|
results = response.xpath(f"//section[@id='advisory-{ptype}']//span[contains(@class,'ipl-status-pill')]/text()")
|
|
|
|
if results:
|
|
|
|
parental_dict[ptype] = results[0].strip()
|
|
|
|
else:
|
|
|
|
raise Failed(f"IMDb Error: No Item Found for IMDb ID: {imdb_id}")
|
|
|
|
if self.config.Cache and not ignore_cache:
|
2022-03-19 16:02:52 +00:00
|
|
|
self.config.Cache.update_imdb_parental(expired, imdb_id, parental_dict, self.config.Cache.expiration)
|
2022-03-15 15:52:55 +00:00
|
|
|
return parental_dict
|
|
|
|
|
2021-12-10 08:13:26 +00:00
|
|
|
def _ids_from_chart(self, chart):
|
|
|
|
if chart == "box_office":
|
|
|
|
url = "chart/boxoffice"
|
|
|
|
elif chart == "popular_movies":
|
|
|
|
url = "chart/moviemeter"
|
|
|
|
elif chart == "popular_shows":
|
|
|
|
url = "chart/tvmeter"
|
|
|
|
elif chart == "top_movies":
|
|
|
|
url = "chart/top"
|
|
|
|
elif chart == "top_shows":
|
|
|
|
url = "chart/toptv"
|
|
|
|
elif chart == "top_english":
|
|
|
|
url = "chart/top-english-movies"
|
|
|
|
elif chart == "top_indian":
|
|
|
|
url = "india/top-rated-indian-movies"
|
|
|
|
elif chart == "lowest_rated":
|
|
|
|
url = "chart/bottom"
|
|
|
|
else:
|
|
|
|
raise Failed(f"IMDb Error: chart: {chart} not ")
|
|
|
|
return self.config.get_html(f"https://www.imdb.com/{url}").xpath("//div[@class='wlb_ribbon']/@data-tconst")
|
|
|
|
|
2021-08-07 06:01:21 +00:00
|
|
|
def get_imdb_ids(self, method, data, language):
|
2021-05-21 14:30:23 +00:00
|
|
|
if method == "imdb_id":
|
2021-08-01 04:35:42 +00:00
|
|
|
logger.info(f"Processing IMDb ID: {data}")
|
2021-08-07 06:01:21 +00:00
|
|
|
return [(data, "imdb")]
|
2021-01-20 21:37:59 +00:00
|
|
|
elif method == "imdb_list":
|
2021-05-09 05:37:45 +00:00
|
|
|
status = f"{data['limit']} Items at " if data['limit'] > 0 else ''
|
2021-08-01 04:35:42 +00:00
|
|
|
logger.info(f"Processing IMDb List: {status}{data['url']}")
|
2021-08-07 06:01:21 +00:00
|
|
|
return [(i, "imdb") for i in self._ids_from_url(data["url"], language, data["limit"])]
|
2021-12-10 08:13:26 +00:00
|
|
|
elif method == "imdb_chart":
|
|
|
|
logger.info(f"Processing IMDb Chart: {charts[data]}")
|
|
|
|
return [(_i, "imdb") for _i in self._ids_from_chart(data)]
|
2021-01-20 21:37:59 +00:00
|
|
|
else:
|
2021-02-24 06:44:06 +00:00
|
|
|
raise Failed(f"IMDb Error: Method {method} not supported")
|