编程语言
首页 > 编程语言> > Python爬虫之利用requests,BeautifulSoup爬取小说标题、章节

Python爬虫之利用requests,BeautifulSoup爬取小说标题、章节

作者:互联网

import requests
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko'}
response = requests.get('https://quanxiaoshuo.com/177913/', headers=headers)
from bs4 import BeautifulSoup
soup = BeautifulSoup(response.text, 'html.parser', from_encoding='utf-8')#html.parser或lxml
title = []
for volumn in soup.find_all(class_="volumn"):
    b = volumn.find('b')
    if b!=None:
        b_title = b.string
        title.append({'volumn': b_title})
chapters = []
for chapter in soup.find_all(class_='chapter'):# 获取所有的a标记中url和章节内容
    a = chapter.find('a')
    chapter_title = a.get('title')
    chapters.append({'chapter_title': chapter_title})
import json
with open('xylz_title.json', 'w') as fp:
    json.dump(title, fp=fp, indent=4)
with open('xylz_chapters.json', 'w') as fp:
    json.dump(chapters, fp=fp, indent=4)
import requests
from bs4 import BeautifulSoup
import json

#获取html内容
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko'}
response = requests.get('https://quanxiaoshuo.com/177913/', headers=headers)
#分析结构,抽取要标记的位置。获取标题与章节
soup = BeautifulSoup(response.text, 'html.parser', from_encoding='utf-8')#html.parser或lxml
title = []
for volumn in soup.find_all(class_="volumn"):
    b = volumn.find('b')
    if b!=None:
        b_title = b.string# 获取标题
        title.append({'volumn': b_title})
chapters = []
for chapter in soup.find_all(class_='chapter'):# 获取所有的a标记中章节
    a = chapter.find('a')
    chapter_title = a.get('title')
    chapters.append({'chapter_title': chapter_title})
#将标题,章节和链接进行JSON储存
with open('xylz_title.json', 'w') as fp:
    json.dump(title, fp=fp, indent=4)
with open('xylz_chapters.json', 'w') as fp:
    json.dump(chapters, fp=fp, indent=4)

 

标签:chapter,fp,title,Python,BeautifulSoup,chapters,爬取,json,find
来源: https://blog.csdn.net/zcs2312852665/article/details/120893410