其他分享
首页 > 其他分享> > 爬虫-获取豆瓣Top250信息

爬虫-获取豆瓣Top250信息

作者:互联网

import time
import requests
from lxml import etree
i = 0
for item in range(0, 275, 25):
    url = f'https://movie.douban.com/top250?start={item}&filter='
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36"
    }
    res = requests.get(url, headers=headers)
    res.encoding = res.apparent_encoding
    page_source = res.text
    page_tree = etree.HTML(page_source)
    page_data = page_tree.xpath('//*[@id="content"]//*[@class="item"]/div[2]')
    i +=1
    print(f'正在抓取第{i}页')
    with open('250.csv', 'a') as f:
        for item in page_data:
            name = item.xpath('./div[1]/a/span[1]/text()')[0]
            fen = item.xpath('./div[2]/div[@class="star"]/span[2]/text()')[0]
            num = item.xpath('./div[2]/div[@class="star"]/span[4]/text()')[0]
            year = item.xpath('./div[2]/p/text()[2]')[0].split()[0]
            country = item.xpath('./div[2]/p/text()[2]')[0].split('/')[1].strip()
            type = item.xpath('./div[2]/p/text()[2]')[0].split('/')[2].strip()
            website = item.xpath('./div[1]/a/@href')[0]
            time.sleep(0.1)
            f.write(f'{name},{fen},{num},{year},{country},{type},{website}')
            f.write('\r')
    time.sleep(0.1)

 

效果展示:

标签:xpath,text,爬虫,item,豆瓣,res,div,Top250,page
来源: https://www.cnblogs.com/mliu/p/16606883.html