经典爬虫学习(四)-selsenium实现斗鱼直播房间信息爬取
作者:互联网
本案例中通过两种不同的selenium作用实现了对斗鱼直播平台的房间信息进行多页爬取。
- selenium实现信息爬取以及页面跳转功能
- selenium实现页面跳转功能,lxml解析库实现数据爬取功能
1.selenium实现信息爬取以及页面跳转功能
# coding=utf-8
from selenium import webdriver
import time
class DouyuSpider:
def __init__(self):
self.start_url = "https://www.douyu.com/directory/all"
self.driver = webdriver.Chrome()
def get_content_list(self):
li_list = self.driver.find_elements_by_xpath("//ul[@class='layout-Cover-list']/li")
content_list = []
for li in li_list:
item = {}
item["room_img"]=li.find_element_by_xpath(".//div[contains(@class,'LazyLoad')]/img").get_attribute("src")
item["room_title"] = li.find_element_by_xpath(".//h3").get_attribute("title")
item["room_cate"] = li.find_element_by_xpath(".//span[contains(@class,'DyListCover-zone')]").text
item["anchor_name"] = li.find_element_by_xpath(".//h2[@class='DyListCover-user']").text
item["watch_num"] = li.find_element_by_xpath(".//span[@class='DyListCover-hot']").text
print(item)
content_list.append(item)
# 获取下一页的元素
next_url = self.driver.find_elements_by_xpath("//span[@class='dy-Pagination-item-custom']")
next_url = next_url[1] if self.driver.find_elements_by_xpath("//span[@class='dy-Pagination-item-custom']/..")[1].get_attribute("aria-disabled") == "false" else None
return content_list,next_url
def save_content_list(self,content_list):
pass
def run(self):#实现主要逻辑
#1.start_url
#2.发送请求,获取响应
self.driver.get(self.start_url)
#3.提取数据,提取下一页的元素
content_list,next_url = self.get_content_list()
#4.保存数据
self.save_content_list(content_list)
#5.点击下一页元素,循环
while next_url is not None:
next_url.click() #点击下一页
print("点击成功")
time.sleep(3) #会立刻提取数据,所以需要等待
content_list,next_url = self.get_content_list()
self.save_content_list(content_list)
if __name__ == '__main__':
douyu = DouyuSpider()
douyu.run()
- 数据保存部分请读者自行编写,可以选择任意输出格式
2.selenium实现页面跳转功能,lxml解析库实现数据爬取功能
# coding=utf-8
from selenium import webdriver
import time
from lxml import etree
class DouyuSpider:
def __init__(self):
self.start_url = "https://www.douyu.com/directory/all"
self.driver = webdriver.Chrome()
def get_content_list(self):
html_data = self.driver.page_source
data = etree.HTML(html_data)
li_list = data.xpath("//ul[@class='layout-Cover-list']/li")
content_list = []
for li in li_list:
item = {}
item["room_img"]=li.xpath(".//div[contains(@class,'LazyLoad')]/img/@src")
item["room_title"] = li.xpath(".//h3/@title")
item["room_cate"] = li.xpath(".//span[contains(@class,'DyListCover-zone')]/text()")
item["anchor_name"] = li.xpath(".//h2[@class='DyListCover-user']/text()")
item["watch_num"] = li.xpath(".//span[@class='DyListCover-hot']/text()")
print(item)
content_list.append(item)
# 获取下一页的元素
next_url = self.driver.find_elements_by_xpath("//span[@class='dy-Pagination-item-custom']")
next_url = next_url[1] if self.driver.find_elements_by_xpath("//span[@class='dy-Pagination-item-custom']/..")[1].get_attribute("aria-disabled") == "false" else None
return content_list,next_url
def save_content_list(self,content_list):
pass
def run(self):#实现主要逻辑
#1.start_url
#2.发送请求,获取响应
self.driver.get(self.start_url)
#3.提取数据,提取下一页的元素
content_list,next_url = self.get_content_list()
#4.保存数据
self.save_content_list(content_list)
#5.点击下一页元素,循环
while next_url is not None:
next_url.click() #点击下一页
print("点击成功")
time.sleep(3) #会立刻提取数据,所以需要等待
content_list,next_url = self.get_content_list()
self.save_content_list(content_list)
if __name__ == '__main__':
douyu = DouyuSpider()
douyu.run()
标签:content,url,self,list,li,爬取,item,斗鱼,selsenium 来源: https://blog.csdn.net/hot7732788/article/details/89039741