编程语言
首页 > 编程语言> > Python爬取豆瓣Top250

Python爬取豆瓣Top250

作者:互联网

Python爬取豆瓣Top250的电影信息

import requests
from lxml import etree
import csv
from concurrent.futures import ThreadPoolExecutor
import time

f = open("movie_top250.csv", "w", encoding="utf-8")
csvwriter = csv.writer(f)

def download_one_page(url):
    header = {
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36"
    }
    resp = requests.get(url, headers= header)
    html = etree.HTML(resp.text)
    table = html.xpath("/html/body/div[3]/div[1]/div/div[1]/ol")[0]
    lis = table.xpath("./li")
    # 遍历获取的内容,并将获取的内存存入csv文件
    for li in lis:
        name = li.xpath("./div/div[2]/div[@class='hd']/a/span[1]/text()")
        info = li.xpath("./div/div[2]/div[@class='bd']/p/text()")
        href = li.xpath("./div/div[2]/div[@class='hd']/a/@href")
        num = li.xpath("./div/div[2]/div[@class='bd']/div/span/text()")
        txt = name + info + href + num
        #print(txt)
        # 对数据做简单的处理:空格、/、\n去除
        txt = (item.replace("\n","").replace(" ", "").replace("\xa0\xa0\xa0", ",").
                   replace("/...", "").replace("\xa0/\xa0","_") for item in txt)

        # 把数据存放在文件中
        csvwriter.writerow(txt)
        time.sleep(0.5)
    print(url, "提取完毕")
    resp.close()

if __name__ == '__main__':
	# 创建线程池,把下载任务提交给线程池
    with ThreadPoolExecutor(10) as t:
        for i in range(1, 251):
            if  i % 25 == 0:
                t.submit(download_one_page, f"https://movie.douban.com/top250?start={i}&filter=")
            else:
                continue
    print("全部获取完毕!")

标签:xpath,txt,Python,li,爬取,import,div,replace,Top250
来源: https://blog.csdn.net/weixin_46363820/article/details/120372772