|
| 1 | +from bs4 import BeautifulSoup |
| 2 | +from cum import config, exceptions |
| 3 | +from cum.scrapers.base import BaseChapter, BaseSeries, download_pool |
| 4 | +from functools import partial |
| 5 | +import concurrent.futures |
| 6 | +import re |
| 7 | +import requests |
| 8 | + |
| 9 | + |
| 10 | +class MangahereSeries(BaseSeries): |
| 11 | + url_re = re.compile(r'https?://((www|m)\.)?mangahere\.cc/manga/.+') |
| 12 | + |
| 13 | + def __init__(self, url, **kwargs): |
| 14 | + super().__init__(url, **kwargs) |
| 15 | + # convert mobile link to desktop |
| 16 | + spage = requests.get(url.replace("m.", "www.")) |
| 17 | + if spage.status_code == 404: |
| 18 | + raise exceptions.ScrapingError |
| 19 | + self.soup = BeautifulSoup(spage.text, config.get().html_parser) |
| 20 | + self.chapters = self.get_chapters() |
| 21 | + |
| 22 | + def get_chapters(self): |
| 23 | + try: |
| 24 | + rows = self.soup.find("ul", class_="detail-main-list")\ |
| 25 | + .find_all("li") |
| 26 | + except AttributeError: |
| 27 | + raise exceptions.ScrapingError() |
| 28 | + chapters = [] |
| 29 | + for i, row in enumerate(rows): |
| 30 | + chap_num = re.match((r"/manga/[^/]+((/v[0-9]+)?" |
| 31 | + r"/c[0-9\.]+)/[0-9]+\.html$"), |
| 32 | + row.find("a")["href"]).groups()[0]\ |
| 33 | + .replace("/", "") |
| 34 | + if "v" in chap_num: |
| 35 | + chap_num = chap_num.replace("v", "").replace("c", ".") |
| 36 | + else: |
| 37 | + chap_num = chap_num.replace("c", "") |
| 38 | + if chap_num == "000": |
| 39 | + chap_num = "0" |
| 40 | + else: |
| 41 | + chap_num = chap_num.lstrip("0") |
| 42 | + # convert mobile link to desktop |
| 43 | + chap_url = "https://www.mangahere.cc" + \ |
| 44 | + row.find("a")["href"].replace("/roll_manga/", "/manga/") |
| 45 | + chap_name = row.find("p", class_="title3").text |
| 46 | + chap_date = row.find("p", class_="title2").text |
| 47 | + result = MangahereChapter(name=self.name, |
| 48 | + alias=self.alias, |
| 49 | + chapter=chap_num, |
| 50 | + url=chap_url, |
| 51 | + title=chap_name, |
| 52 | + groups=[], |
| 53 | + upload_date=chap_date) |
| 54 | + chapters.append(result) |
| 55 | + return chapters |
| 56 | + |
| 57 | + @property |
| 58 | + def name(self): |
| 59 | + try: |
| 60 | + return re.match(r".+ - Read (.+) Online at MangaHere$", |
| 61 | + self.soup.find("title").text).groups()[0] |
| 62 | + except AttributeError: |
| 63 | + raise exceptions.ScrapingError |
| 64 | + |
| 65 | + |
| 66 | +class MangahereChapter(BaseChapter): |
| 67 | + url_re = re.compile((r'https?://((www|m)\.)?mangahere\.cc' |
| 68 | + r'/(roll_)?manga(/v[0-9]+)?/c[0-9\.]+/[0-9]+\.html$')) |
| 69 | + upload_date = None |
| 70 | + uses_pages = True |
| 71 | + |
| 72 | + def download(self): |
| 73 | + if not getattr(self, "cpage", None): |
| 74 | + self.cpage = requests.get(self.url.replace("www.", "m.") |
| 75 | + .replace("/manga/", "/roll_manga/")) |
| 76 | + if not getattr(self, "soup", None): |
| 77 | + self.soup = BeautifulSoup(self.cpage.text, |
| 78 | + config.get().html_parser) |
| 79 | + |
| 80 | + image_list = self.soup.find("div", class_="mangaread-img")\ |
| 81 | + .find_all("img") |
| 82 | + pages = [] |
| 83 | + for image in image_list: |
| 84 | + pages.append(image["data-original"].replace("http://", "https://")) |
| 85 | + |
| 86 | + futures = [] |
| 87 | + files = [None] * len(pages) |
| 88 | + req_session = requests.Session() |
| 89 | + with self.progress_bar(pages) as bar: |
| 90 | + for i, page in enumerate(pages): |
| 91 | + retries = 0 |
| 92 | + while retries < 10: |
| 93 | + try: |
| 94 | + r = req_session.get(page, stream=True) |
| 95 | + break |
| 96 | + except requests.exceptions.ConnectionError: |
| 97 | + retries += 1 |
| 98 | + if r.status_code != 200: |
| 99 | + r.close() |
| 100 | + raise ValueError |
| 101 | + fut = download_pool.submit(self.page_download_task, i, r) |
| 102 | + fut.add_done_callback(partial(self.page_download_finish, |
| 103 | + bar, files)) |
| 104 | + futures.append(fut) |
| 105 | + concurrent.futures.wait(futures) |
| 106 | + self.create_zip(files) |
| 107 | + |
| 108 | + def from_url(url): |
| 109 | + chap_num = re.match((r"https?://((www|m)\.)?mangahere\.cc/(roll_)?" |
| 110 | + r"manga/[^/]+((/v[0-9]+)?/c[0-9\.]+)" |
| 111 | + r"/[0-9]+\.html"), url)\ |
| 112 | + .groups()[3].replace("/", "") |
| 113 | + if "v" in chap_num: |
| 114 | + chap_num = chap_num.replace("v", "").replace("c", ".") |
| 115 | + else: |
| 116 | + chap_num = chap_num.replace("c", "") |
| 117 | + if chap_num == "000": |
| 118 | + chap_num = "0" |
| 119 | + else: |
| 120 | + chap_num = chap_num.lstrip("0") |
| 121 | + parent_url = re.match((r"(https?://((www|m)\.)?mangahere\.cc/(roll_)?" |
| 122 | + r"manga/[^/]+)(/v[0-9]+)?/" |
| 123 | + r"c[0-9\.]+/[0-9]+\.html"), |
| 124 | + url).groups()[0] |
| 125 | + series = MangahereSeries(parent_url) |
| 126 | + for chapter in series.chapters: |
| 127 | + if chapter.chapter == str(chap_num): |
| 128 | + return chapter |
| 129 | + return None |
0 commit comments