爬虫-提取百度搜索结果
作者:互联网
代码
import json
import requests
from lxml import etree
import time
from tqdm import tqdm
# 想要搜索的词
word = '粮食'
# 链接
urls = 'https://www.baidu.com/s?wd='+word+'&pn='
headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36"
}
for j in tqdm(range(100)):
url = urls+str(j*10)
response = requests.get(url, headers=headers)
r = response.text
html = etree.HTML(r, etree.HTMLParser())
r1 = html.xpath('//h3')
r2 = html.xpath('//*[@class="c-abstract"]')
r3 = html.xpath('//*[@class="t"]/a/@href')
for i in range(len(r2)):
r11 = r1[i].xpath('string(.)')
r22 = r2[i].xpath('string(.)')
r33 = r3[i]
with open('baidu_sousuo1.txt', 'a', encoding='utf-8') as c:
c.write(json.dumps(r11,ensure_ascii=False) + '\n')
c.write(json.dumps(r22, ensure_ascii=False) + '\n')
c.write(json.dumps(r33, ensure_ascii=False) + '\n')
# print(r11, end='\n')
# print('------------------------')
# print(r22, end='\n')
# print(r33)
time.sleep(1) # 暂停 1 秒
结果
标签:xpath,提取,r11,爬虫,json,html,print,import,百度 来源: https://blog.csdn.net/qq_46319397/article/details/122430165