利用爬虫有道词典进行翻译python3中的内置函数属性功能

利用爬虫有道词典进行翻译python3中的内置函数属性功能from urllib import request
from urllib import parse
import json
import time
import random
import hashlib

# if __name__=="__main__":
#     response=request.urlopen("http://fanyi.baidu.com")#获取url信息 ,返回一个类似于文本的对象
#     html=response.read()#调用read()进行读取
#     html=html.decode('utf-8') #把网站里面的二进制的信息展示出来 用我们能看动的utf-8格式
#     print(html)


# if __name__=='__main__':
#     req=request.Request('http://fanyi.baidu.com')
#     response=request.urlopen(req) # 这里除l可以接受字符串 外 还可以接受一个request对象
#     print(response.geturl())
#     print('*******************')
#     print(response.getcode())
#     print('*************************')
#     print(response.info())


   

def fanyi(content):
    
        
    request_url='http://fanyi.youdao.com/translate'
    #创建自己字典进行的
    Form_data={}#post传递给服务器进行数据的请求
    # content=input('请输入单词:')
    # if not content:
    #     break
    u = 'fanyideskweb'
    d = content

    f = str(int(time.time()*1000) + random.randint(1,10))
    c = 'ebSeFb%=XZ%T[KZ)c(sy!'
    
    sign = hashlib.md5((u + d + f + c).encode('utf-8')).hexdigest()
  
    Form_data['i']=content
    Form_data['from']='AUTO'
    Form_data['to']='AUTO'
    Form_data['smartresult']='dict'
    Form_data['client']='fanyideskweb'
    
    Form_data['doctype']='json'
    Form_data['version']='2.1'
    Form_data['keyfrom']='fanyi.web'
    Form_data['action']='FY_BY_REALTIME'
    Form_data['typoResult']='false'
    Form_data['sign'] = sign
    Form_data['salt'] = f
    # print(sign)
    # print(f)
    head={}
    head['Accept'] = 'application/json, text/javascript, */*; q=0.01'
    # head['Accept-Encoding'] = 'gzip, deflate'
    #这个不能乱写 
    head['Accept-Language'] = 'zh-CN,zh;q=0.9'
    head['Connection'] = 'keep-alive'
    head['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
    head['Cookie'] = '[email protected]; JSESSIONID=aaaZP4wtQ7NHu6YzqAuuw; OUTFOX_SEARCH_USER_ID_NCOO=1469627843.6964185; ___rl__test__cookies='+f
    head['Host'] = 'fanyi.youdao.com'
    head['Referer'] = 'http://fanyi.youdao.com/'
    head[ 'User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36'
    head['X-Requested-With'] = 'XMLHttpRequest'

   #这里 注意cookie  salt sign 的反扒机制的处理


    data=bytes(parse.urlencode(Form_data),encoding='utf-8')#转成所需要的数据
    # print(data)
    request_url=request.Request(request_url,data,head)
    # print(request_url)
    response=request.urlopen(request_url)
    # print(response)
    html=response.read().decode('utf-8')#读取并解码
    
    # print(html)

    res=json.loads(html)

    # print(res)

    res = res['translateResult'][0][0]['tgt']

    return res

def info(object,collapse=0,space=15):
    methodList=[method for method in dir(object) if callable(getattr(object,method))]

    # processFun=collapse and(lambda s:' '.join(s.split())) or (lambda s:s)
    processFun=collapse and (lambda s:' '.join(s.split())) or (lambda s:s)# 传递一个S 输出一个S
    # print('\n'.join(['%s %s'%((method.ljust(space)),(processFun(str(getattr(object,method).__doc__)))) for method in methodList]))
    print('\n'.join(['%s %s'%((method.ljust(space)),(fanyi(str(processFun(str(getattr(object,method).__doc__)))))) for method in methodList]))
# info(request)

if __name__=='__main__':
    while 1:
        name=input('请输入要查询的函数:')
        if not name:
            break
        info(name)

# 这里面的程序有一个问题就是当出现查询的函数跟字符串 怎么识别  ,我这里统一处理成字符串,发现结果有问题