其他分享
首页 > 其他分享> > Wallhaven.cc | 超级高清又好看的图片

Wallhaven.cc | 超级高清又好看的图片

作者:互联网

import time
from pyquery import PyQuery
import os
import asyncio
import aiohttp
import warnings


# 获取一个请求里的所有图片页面详情链接
async def url_pages(page):
    async with semaphore:
        _url = 'https://wallhaven.cc/toplist?page={}'.format(page)
        async with session.get(_url) as response:
            result = await response.text()
            status = response.status
            while status == 429:
                await asyncio.sleep(2)
                async with session.get(_url) as resp:
                    result = await resp.text()
                    status = resp.status
            for _item in PyQuery(result)('.thumb-listing-page li').items():
                async with lock:
                    img_url.append(_item('.preview').attr('href'))


# 获取图片昵称、链接
async def get_img_link(_url):
    async with session.get(_url) as response:
        html = await response.text()
        status = response.status
        while status == 429:
            await asyncio.sleep(2)
            async with session.get(_url) as resp:
                html = await resp.text()
                status = resp.status
        _url = PyQuery(html)('#wallpaper').attr('src')
        img_name = os.path.basename(_url)
        return _url, img_name

# 下载图片
async def img_save():
    async with semaphore:
        flag = 3
        while flag:
            if not len(img_url):
                await asyncio.sleep(5)
                flag = flag - 1
                continue
            _url = img_url.pop(0)
            url, name = await get_img_link(_url)
            global number
            number = number + 1
            print("下载第{}张 --> {}".format(number, name))
            if os.path.exists(name):
                continue
            async with session.get(url) as res:
                with open(name, 'wb') as f:
                    f.write(await res.read())


# 主方法
async def scrape_main():
    global session
    session = aiohttp.ClientSession()
    scrape_index_tasks = [asyncio.ensure_future(img_save()) for i in range(20)]
    scrape_index_tasks.extend([asyncio.ensure_future(url_pages(page)) for page in range(1, pages+1)])
    await asyncio.wait(scrape_index_tasks)
    await session.close()
    print("任务爬取结束O(∩_∩)O~  共爬取{}张图片".format(number))


if __name__ == '__main__':
    warnings.filterwarnings("ignore", category=DeprecationWarning)
    semaphore = asyncio.Semaphore(30)
    start_time = time.time()
    save_path = os.getcwd() + "\wallpaper"
    if not os.path.exists(save_path):
        os.mkdir(save_path)
    os.chdir(save_path)

    number = 0
    img_url = []
    session = None
    lock = asyncio.Lock()

    # 这个是输入的下载页数,最多201页,一页24张图片
    pages = 3

    asyncio.get_event_loop().run_until_complete(scrape_main())
    print(f"累计耗时{time.time()-start_time:.2f}s")

标签:status,img,cc,await,Wallhaven,高清,url,async,asyncio
来源: https://www.cnblogs.com/echohye/p/16054292.html