上代码:
#************************************************************
#文件功能:利用百度翻译将英文名翻译成中文
#************************************************************
import csv,requests,random
from fake_useragent import UserAgent
import hashlib
import json
import time
import urllib.parse
class Baidufanyi:
"""docstring for ClassName"""
def __init__(self, appid, appSecret, ip_list):
self.url = 'https://fanyi-api.baidu.com/api/trans/vip/translate'
self.ip_list = ip_list #ip列表
self.appid = appid #应用ID
self.appSecret = appSecret #应用密钥
self.langFrom = 'en' #翻译前语言
self.langTo = 'zh' #翻译后语言
'''
功能:将数据url编码
注释:param queryText:待翻译的文字
return:返回url编码过的数据
'''
def getUrlEncodeData(self,queryText):
salt = '2' #我们将随机数固定为2
sign_str = self.appid + queryText + salt + self.appSecret
sign_str = sign_str.encode('utf-8')
sign = hashlib.md5(sign_str).hexdigest()
payload = {
'q': queryText,
'from': self.langFrom,
'to': self.langTo,
'appid': self.appid,
'salt': salt,
'sign': sign
}
# 注意是get请求,不是请求
data = urllib.parse.urlencode(payload)
return data
'''''
解析页面,输出翻译结果
:param html: 翻译返回的页面内容
:return: None
'''
def parseHtml(self, html):
data = json.loads(html)
print ('-------------------------')
translationResult = data['trans_result'][0]['dst']
if isinstance(translationResult, list):
translationResult = translationResult[0]
print (translationResult)
return translationResult
def get_translateResult(self,queryText):
data = self.getUrlEncodeData(queryText) #获取url编码过的数据
target_url = self.url + '?' + data #构造目标url
print('target_url为:'+target_url)
headers = {'User-Agent':'str(UserAgent().random)'}
try:
proxies = get_randomIp(self.ip_list)
req = requests.get(target_url,proxies=proxies,headers=headers,timeout=10) #构造请求
except:
print('运行错误,暂停20秒')
proxies = get_randomIp(self.ip_list)
req = requests.get(target_url,proxies=proxies,headers=headers) #再次进行构造请求
req.encoding='utf-8'
html = req.text
translateResult = self.parseHtml(html) #解析,显示翻译结果
return translateResult
#获取IP列表并检验IP的有效性
def get_ipList():
f=open('IP.txt','r')
ip_list=f.readlines()
f.close()
return ip_list
#从IP列表中获取随机IP
def get_randomIp(ip_list):
proxy_ip = random.choice(ip_list)
proxy_ip=proxy_ip.strip('\n')
proxies = {'http': proxy_ip}
return proxies
#功能:获取需要翻译的文件内容
def reader_file(filePath):
reader=[]
with open(filePath,'r',encoding='utf-8') as csvfile:
spanreader = csv.reader(csvfile,delimiter='|',quoting=csv.QUOTE_MINIMAL)
for row in spanreader:
if row:
reader.append(row)
return reader
#功能:将信息写入文件
def write_file(filePath,row):
with open(filePath,'a+',encoding='utf-8',newline='') as csvfile:
spanreader = csv.writer(csvfile,delimiter='|',quoting=csv.QUOTE_MINIMAL)
spanreader.writerow(row)
#主程序
def main():
print('程序开始运行!')
appid = appid #应用ID
appSecret = appSecret #应用密钥
filePath = 'baidubaike.csv' #需要翻译的文件
ip_list = get_ipList()
fanyi = Baidufanyi(appid,appSecret,ip_list)
reader = reader_file(filePath)
for row in reader:
translateResult = '翻译成功后的结果' #翻译成功后的结果
if not row[6]:
print('现在翻译的英文名是:'+row[0])
translateResult = fanyi.get_translateResult(row[0])
print('翻译成功后的结果是:'+translateResult)
row[6] = translateResult
write_file('baidubaike_notChinese.csv',row) #将爬取过的内容存入test.csv文件
else:
write_file('baidubaike_Chinese.csv',row) #将未进行爬取的内容存进test_.csv文件
print('信息爬取成功,程序运行结束')
if __name__ == '__main__':
main()