其他分享
首页 > 其他分享> > 爬虫练手

爬虫练手

作者:互联网

import urllib.parse
import urllib.request
import json

content = input("请输入要翻译的内容:")
url = "https://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule"
data = {'i': content, 'from': 'AUTO', 'to': 'AUTO', 'smartresult': 'dict', 'client': 'fanyideskweb',
        'salt': '16592445720710', 'sign': '84ffdb9715d2e19c008788f42edb0b94', 'lts': '1659244572071',
        'bv': '75aec829d151e3209d90568c50f6766c', 'doctype': 'json', 'version': '2.1', 'keyfrom': 'fanyi.web',
        'action': 'FY_BY_CLICKBUTTION'}
# data['i'] = 'i am yoki'

data = urllib.parse.urlencode(data).encode('utf-8')  # 解析 URL,
# encode(编码):将unicode编码转换成其他编码的字符串,如str2.encode('utf-8'),表示将unicode编码的字符串str2转换成utf-8编码。
# response = urllib.request.urlopen(url, data)  # 发送请求得到响应
req = urllib.request.Request(url, data, head)
"""
通过Request的header参数修改
通过Request.add_header()方法修改
"""
# req = urllib.request.Request(url, data)
# req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
#                              'Chrome/103.0.5060.134 Safari/537.36 Edg/103.0.1264.77')
response = urllib.request.urlopen(req)
html = response.read().decode('utf-8')
# decode(解码):将其他编码的字符串转换成unicode编码,如str1.decode('utf-8'),表示将编码的字符串str1转换成unicode编码
# 想要将其他的编码转换成utf-8必须先将其解码成unicode然后重新编码成utf-8,它是以unicode为转换媒介的。
print(html)  # {"errorCode":50}错误:
# 有道翻译做了一个反爬虫机制,就是在参数中添加了 salt 和 sign 验证

translate_results = json.loads(html)
# 找到翻译结果
translate_results = translate_results['translateResult'][0][0]['tgt']
# 打印翻译信息
print("翻译的结果是: %s" % translate_results)
head = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/103.0.5060.134 Safari/537.36 Edg/103.0.1264.77 '}
content = input("请输入要翻译的内容:")
url = "https://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule"
data = {'i': content, 'from': 'AUTO', 'to': 'AUTO', 'smartresult': 'dict', 'client': 'fanyideskweb',
        'salt': '16592445720710', 'sign': '84ffdb9715d2e19c008788f42edb0b94', 'lts': '1659244572071',
        'bv': '75aec829d151e3209d90568c50f6766c', 'doctype': 'json', 'version': '2.1', 'keyfrom': 'fanyi.web',
        'action': 'FY_BY_CLICKBUTTION'}
# data['i'] = 'i am yoki'

data = urllib.parse.urlencode(data).encode('utf-8')
"通过Request的header参数修改"
req = urllib.request.Request(url, data, head)
"通过Request.add_header()方法修改"
# req = urllib.request.Request(url, data)
# req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
#                              'Chrome/103.0.5060.134 Safari/537.36 Edg/103.0.1264.77')
response = urllib.request.urlopen(req)
html = response.read().decode('utf-8')
print(html)  # {"errorCode":50}错误:

translate_results = json.loads(html)
# 找到翻译结果
translate_results = translate_results['translateResult'][0][0]['tgt']
# 打印翻译信息
print("翻译的结果是: %s" % translate_results)

 

标签:练手,编码,utf,爬虫,urllib,results,translate,data
来源: https://www.cnblogs.com/yuqi-yoki/p/16538780.html