编程语言
首页 > 编程语言> > python爬取小说

python爬取小说

作者:互联网

from urllib import request
from bs4 import BeautifulSoup
import re,codecs

def download(url,i=0):#下载网页
    #获取HTML
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}    
    req = request.Request(url, headers=headers)
    html = request.urlopen(req).read()
    #保存HTML
    file_name = i
    with open ("{}.html".format(i),"wb") as f:
        f.write(html)  
    with open('{}.html'.format(i), 'rb') as f:
        Soup = str(BeautifulSoup(f.read(), 'html.parser')) #把html转化为string
    return Soup 

def save(name,txt): # 得到标题和正文之后,保存为txt
    f = codecs.open("{}.txt".format(name),'w','utf-8')
    f.write(txt)

def tackle(url,i): 
    Soup = download(url,i) # 获取字符串,字符串内容为整个网页
    pattern_title = '<div class="title">.*</div>' #匹配,其中.*代表匹配中间全部字符(除了换行)
    mp = re.search(pattern_title,Soup) #正则搜索
    title = mp.group()[12:-6] # group方法返回搜索结果的字符串形式,并把字符串中<div class="title">和</div>去掉
    start = Soup.find('<div class="content" id="content">')
    end = Soup.find('&lt;/p&gt;</div>')
    pattern_body = Soup[start+34:end] #标记正文位置
    save(title+'.txt',pattern_body)

if __name__ == "__main__":
    Soup = download('path') # 小说目录网址
    place = [substr.start() for substr in re.finditer('http://www.ncwxw.cc/0/298/8',Soup)] # 字符串匹配,确定所有章节的网址位置
    place = place[13:] #预处理
    website = [] #存储每一章的网址
    for chapter in place:
        website.append(Soup[chapter:chapter+36])
    '''以上适用于每一章节网址无规律的情况,若有规律则可直接变址寻址'''
    for i in range(1,1979):
        tackle(website[i],i)
    

 

标签:__,txt,name,python,pattern,爬取,Soup,html,小说
来源: https://www.cnblogs.com/chengjunkai/p/15807131.html