scrapy框架爬取网易新闻内容
作者:互联网
需求
爬取网易新闻中的新闻数据(标题和内容)
1.通过网易新闻的首页解析出五大板块对应的详情页的url(没有动态加载)
2.每一个板块对应的新闻标题都是动态加载出来的(动态加载)
3.通过解析出每一条新闻详情页的url获取详情页的页面源码,解析出新闻内容
代码实现
数据解析
需要获取国内、国际,两大板块的详情页地址。它们均存在ul下的li标签
xpath定位://*[@id="index2016_wrap"]/div[2]/div[2]/div[2]/div[2]/div/ul/li
分别位于第3 和 4 标签列
import scrapy
from wangyiPro.items import WangyiproItem
from selenium import webdriver
class WangyiSpider(scrapy.Spider):
name = 'wangyi'
# allowed_domains = ['www.xxx.com']
start_urls = ['https://news.163.com/']
url_list = [] #存储板块对应详情页的url
#实例化一个浏览器对象
def __init__(self):
self.bro = webdriver.Chrome()
##解析大板块对应详情页的url
def parse(self, response):
list = response.xpath('//*[@id="index2016_wrap"]/div[2]/div[2]/div[2]/div[2]/div/ul/li')
index_list = [2,3]
for index in index_list:
url = list[index].xpath('./a/@href').extract_first()
self.url_list.append(url)
for url in self.url_list:
yield scrapy.Request(url,callback=self.parse_model)
# 每一个板块对应的新闻标题相关的内容都是动态加载
def parse_model(self, response):#解析每一个板块页面中对应新闻的标题和新闻详情页的url
div_list = response.xpath('/html/body/div/div[3]/div[4]/div[1]/div[1]/div/ul/li/div/div')
for div in div_list:
title = div.xpath('./div/div/h3/a/text()').extract_first()
detail_url = div.xpath('./div/div/h3/a/@href').extract_first()
item = WangyiproItem()
item['title'] = title
# 对新闻详情页的url发起请求
yield scrapy.Request(url=detail_url,callback=self.parse_detail,meta={'item':item})
# 解析新闻内容
def parse_detail(self, response):
content = response.xpath('//div[@id="content"]/div[2]//text()').extract()
content = ''.join(content)
item = response.meta['item']
item['content'] = content
yield item
#关闭浏览器
def closed(self,spider):
self.bro.quit()
中间件处理
class WangyiproDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
# 该方法拦截板块对应的响应对象,进行篡改
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):#spider爬虫对象
bro = spider.bro # 获取了在爬虫类中定义的浏览器对象
# 挑选出指定的响应对象进行篡改
# 通过url指定request
# 通过request指定response
if request.url in spider.url_list:
bro.get(request.url) # 五个板块对应的url进行请求
sleep(3)
page_text = bro.page_source # 包含了动态加载的新闻数据
# response #五大板块对应的响应对象
# 针对定位到的这些response进行篡改
# 实例化一个新的响应对象(符合需求:包含动态加载出的新闻数据),替代原来旧的响应对象
# 如何获取动态加载出的新闻数据?
# 基于selenium便捷的获取动态加载数据
new_response = HtmlResponse(url=request.url, body=page_text, encoding='utf-8', request=request)
return new_response
else:
# response #其他请求对应的响应对象
return response #其他请求对应的响应对象
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
配置文件
持久化存储
标签:网易,url,self,request,spider,爬取,scrapy,div,response 来源: https://www.cnblogs.com/simon1993/p/16315387.html