编程语言
首页 > 编程语言> > Python 爬取《国王排名》漫画

Python 爬取《国王排名》漫画

作者:互联网

最近在B站看了一部动漫,也是目前比较火的动漫之一《国王排名》
因为…

一周更一集,目前才更到第八集,所以想找找现成的动漫看看O(∩_∩)O~,可惜是漫画只有繁体字版。

因为涉及到JS解密,个人对这方面的信息爬取还是不够熟练,所以参考了CSDN上的一篇文章,写的还是挺不错的,有点基础的应该能够看懂!
点击下面链接即可跳转:
Python 爬取漫画以及JS解析

下面的代码是我参照了上面链接文章的JS解密写的代码,主要针对《国王排名》漫画的爬取下载,仅供参考!

from reportlab.lib.pagesizes import portrait
from reportlab.pdfgen import canvas
from PIL import Image
from pyquery import PyQuery
import requests
import execjs
import glob
import re
import os


headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36',
}
url = 'http://www.dm5.com/manhua-guowangpaiming/'


def getOne(url):
    """获取漫画章节"""
    url_One = []
    html = PyQuery(requests.get(url, headers=headers).content.decode('utf-8'))
    id_url = html("#detail-list-select-1 li a")
    for i in id_url.items():
        url_One.append([i.text(), 'http://www.dm5.com' + i.attr("href")])
    url_One.reverse()
    return url_One


def getTwo(chapters_url):
    """解析漫画"""
    pathOne = os.getcwd() + r'\国王排名'
    if not os.path.exists(pathOne):
        os.mkdir(pathOne)
    # 获取漫画的必要参数
    for chapter_name, chapter_url in chapters_url:
        print(f"开始下载 >> {chapter_name} << ")
        pathTwo = pathOne + '\\' + chapter_name
        if not os.path.exists(pathTwo):
            os.mkdir(pathTwo)
        response = requests.get(chapter_url)
        print(chapter_url)
        text = response.text
        cid = re.findall('var DM5_CID=(.*?);', text)[0].strip()
        mid = re.findall('var DM5_MID=(.*?);', text)[0].strip()
        dt = re.findall('var DM5_VIEWSIGN_DT="(.*?)";', text)[0].strip()
        sign = re.findall('var DM5_VIEWSIGN="(.*?)";', text)[0].strip()
        page_count = int(re.findall('var DM5_IMAGE_COUNT=(.*?);', text)[0].strip())
        # print(cid, mid, dt, sign, page_count)
        page = 1
        while page <= page_count:
            js_api = f'{chapter_url}chapterfun.ashx?cid={cid}&page={page}&key=&language=1&gtk=6&_cid={cid}&_mid={mid}&_dt={dt}&_sign={sign}'
            ret = requests.get(js_api, headers={'referer': 'http://www.dm5.com'})
            js_code = ret.text
            image_url = execjs.eval(js_code)
            img_url = image_url[0]
            try:
                with open(f'{pathTwo}\\{page}.jpg', 'wb') as f:
                    f.write(requests.get(img_url).content)
                print(f"下载 {chapter_name} {page}.jpg......")
            except Exception as e:
                print(f'{chapter_name} {page}下载失败:{e}')
            page += 1
        jpg_path = glob.glob(f"{pathTwo}\*.jpg")
        # jpg_path = os.listdir(path+"\\"+i)
        # jpg_path.sort(key=lambda x: int(x.split('.')[0]))
        jpg_path.sort(key=lambda x: int(os.path.basename(x).split('.')[0]))
        pdf_path = f'国王排名 {os.path.split(pathTwo)[1]}.pdf'
        jpg_to_pdf(jpg_path, pdf_path)


def jpg_to_pdf(jpgs, path):
    """生成PDF文件"""
    w, h = Image.open(jpgs[0]).size
    ca = canvas.Canvas(path, pagesize=portrait((w, h)))
    for jpg in jpgs:
        ca.drawImage(jpg, 0, 0, w, h)
        ca.showPage()
    ca.save()
    print(path+' >> 已保存至pdf')


def main():
    urls_one = getOne(url)
    getTwo(urls_one)


if __name__ == '__main__':
    main()

标签:Python,jpg,爬取,url,import,漫画,path,os,page
来源: https://blog.csdn.net/m0_51460728/article/details/122267361