LouGou小爬虫
作者:互联网
_author: Jolly
date: 2019/9/8
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from lxml import etree
import time
import csv
class Lagouspider(object):
def init(self, writer_info):
self.writer_info = writer_info
self.list_page_url = 'https://www.lagou.com/jobs/list_python?labelWords=&fromSearch=true&suginput='
self.driver_path = r'D:\xuexiruanjian\chromedriver\chromedriver.exe'
self.driver = webdriver.Chrome(executable_path=self.driver_path)
def run(self): # 主要是的到list页面的源码,以及跳转到下一个list页面
self.driver.get(self.list_page_url) # 打开第一页的页面
while True:
list_page_source = self.driver.page_source # 获取list页面的源码
self.parse_list_page(list_page_source) # 执行parse_list_page函数
span_tag = self.driver.find_element_by_xpath('//div[@class="pager_container"]//span[@class="pager_next "]') # 获取"下一页"的html标签
if "pager_next pager_next_disabled" in span_tag.get_attribute('class'):
break
else:
span_tag.click() # 点击跳转到下一list页面
time.sleep(2)
print('==========下一页========', end='\n\n')
def parse_list_page(self, list_page_source): # 主要是得到详情页面的url
htmlelement = etree.HTML(list_page_source)
links = htmlelement.xpath('//a[@class="position_link"]/@href') # 得到一个页面中所有的详情页的url
for link in links:
self.request_detail_page(link) # 对于每个详情页的URL执行request_detail_page函数
time.sleep(1)
# break
def request_detail_page(self, link): # 主要是打开新的职位页面并将driver切换到该职位页面
self.driver.execute_script("window.open('%s')" % link) # 执行js语法
self.driver.switch_to.window(self.driver.window_handles[1]) # 将driver切换到该职位页面
detail_page_source = self.driver.page_source # 获取driver所在页面源码
self.parse_detail_page(detail_page_source) # 调用parse_detail_page函数并传入参数
WebDriverWait(self.driver, 10).until( # 显示等待
EC.presence_of_element_located((By.XPATH, '//div[@class="job-detail"]'))
)
time.sleep(1)
self.driver.close() # 关闭driver所在页面
self.driver.switch_to.window(self.driver.window_handles[0]) # 将driver切换到与原来的列表页面
def parse_detail_page(self, detail_page_source): # 主要是得到职位信息并保存
htmlelement = etree.HTML(detail_page_source)
title = "".join(htmlelement.xpath('//div[@class="ceil-left"]/span[@class="ceil-job"]/text()')).strip()
content = "".join(htmlelement.xpath('//div[@class="job-detail"]//text()')).strip()
self.writer_info.writerow((title, content)) # 保存成csv文件
print(title, content, sep="\n")
print('-----'*10)
# def save_document(self, title, content):
def main():
fp = open('lagouinfo.csv', 'a', encoding='utf-8')
writer_info = csv.writer(fp)
writer_info.writerow(('position', 'introduce')) # 写入csv文件头部信息
spider = Lagouspider(writer_info)
spider.run()
if name == 'main':
main()
标签:LouGou,self,小爬虫,driver,detail,list,page,页面 来源: https://www.cnblogs.com/Jolly-hu/p/12227291.html