python:爬虫学习与教学(2)

爬虫程序隐藏

通过request的header设置请求头,要么是个字典{},创建request之前设置好;要么可以通过add_header(key,value)添加,创建request对象后调用

主要是以下属性设置:

User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:75.0) Gecko/20100101 Firefox/75.0

如图:


import urllib.request
import urllib.parse
import json

content=input('请输入要翻译的内容:')
url='http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'

head={}
head['User-Agent']='Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:75.0) Gecko/20100101 Firefox/75.0'


data={}
data['i']=content
data['from']='AUTO'
data['to']='AUTO'
data['smartresult']='dict'
data['client']='fanyideskweb'
data['salt']='15890140685391'
data['sign']='483f27800c357ce0e9a60057df27dda1'
data['ts']='1589014068539'
data['bv']='abf85f8020851128b561472c8a7b924d'
data['doctype']='json'
data['version']='2.1'
data['keyfrom']='fanyi.web'
data['action']='FY_BY_CLICKBUTTION'

data=urllib.parse.urlencode(data).encode('utf-8')  #这里要把数据封装成网页要求的格式,同时考虑中文的正常显示

#response=urllib.request.urlopen(url,data)

req=urllib.request.Request(url,data,head)         #构造请求request,包括url,data,headers
response=urllib.request.urlopen(req)

html=response.read().decode('utf-8')                #这里要把获取到的数据解码

#print(html)
target=json.loads(html)

#print(target['translateResult'][0][0]['src'])
print(req.headers)
print('翻译结果是:',target['translateResult'][0][0]['tgt'])

 

使用代理:重点

步骤:
1. 参数是一个字典 {‘类型’:‘代理ip:端口号’}
proxy_support = urllib.request.ProxyHandler({})

2. 定制、创建一个 opener:是一个私人定制,替换默认的opener
opener = urllib.request.build_opener(proxy_support)

3a. 安装 opener
urllib.request.install_opener(opener)
3b. 调用 opener
opener.open(url)

举例:

到网络上搜索找一个代理IP,不见得都能用,看运气

如:

http://31f.cn/

测试,当前上网的IP地址方法url:http://www.whatsmyip.net/,可以用这个地址代替程序中的url

现在好多代理不可用,下列程序仅供参考

import urllib.request
import random

url = 'http://www.whatismyip.com.tw'

iplist = ['119.6.144.73:81', '183.203.208.166:8118', '111.1.32.28:81']

#创建opener
proxy_support = urllib.request.ProxyHandler({'http':random.choice(iplist)})

opener = urllib.request.build_opener(proxy_support)
#改变header,避免网站反爬虫检测,也可以用创建Request实例对象实现
opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.65 Safari/537.36')]
#安装,改变默认opener
urllib.request.install_opener(opener)

#以下操作与以前类似

response = urllib.request.urlopen(url)
html = response.read().decode('utf-8')

print(html)


 

import urllib.request
import os
import random


def url_open(url):
    req = urllib.request.Request(url)
    req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.65 Safari/537.36')

    proxies = ['119.6.144.70:81', '111.1.36.9:80', '203.144.144.162:8080']
    proxy = random.choice(proxies)

    proxy_support = urllib.request.ProxyHandler({'http':proxy})
    opener = urllib.request.build_opener(proxy_support)
    urllib.request.install_opener(opener)

    response = urllib.request.urlopen(url)
    html = response.read()

    return html


def get_page(url):
    html = url_open(url).decode('utf-8')

    a = html.find('current-comment-page') + 23
    b = html.find(']', a)

    return html[a:b]


def find_imgs(url):
    html = url_open(url).decode('utf-8')
    img_addrs = []

    a = html.find('img src=')

    while a != -1:
        b = html.find('.jpg', a, a+255)
        if b != -1:
            img_addrs.append(html[a+9:b+4])
        else:
            b = a + 9

        a = html.find('img src=', b)

    return img_addrs


def save_imgs(folder, img_addrs):
    for each in img_addrs:
        filename = each.split('/')[-1]
        with open(filename, 'wb') as f:
            img = url_open(each)
            f.write(img)


def download_mm(folder='OOXX', pages=10):
    os.mkdir(folder)
    os.chdir(folder)

    url = "http://jandan.net/ooxx/"
    page_num = int(get_page(url))

    for i in range(pages):
        page_num -= i
        page_url = url + 'page-' + str(page_num) + '#comments'
        img_addrs = find_imgs(page_url)
        save_imgs(folder, img_addrs)

if __name__ == '__main__':
    download_mm()

 

异常处理:

处理异常的第一种写法

from urllib.request import Request, urlopen

from urllib.error import URLError, HTTPError

req = Request(someurl)

try:

    response = urlopen(req)

except HTTPError as e:

    print('The server couldn\'t fulfill the request.')

    print('Error code: ', e.code)

except URLError as e:

    print('We failed to reach a server.')

    print('Reason: ', e.reason)

else:

# everything is fine

处理异常的第二种写法

from urllib.request import Request, urlopen

from urllib.error import URLError

req = Request(someurl)

try:

    response = urlopen(req)

except URLError as e:

    if hasattr(e, 'reason'):

        print('We failed to reach a server.')

        print('Reason: ', e.reason)

    elif hasattr(e, 'code'):

        print('The server couldn\'t fulfill the request.')

        print('Error code: ', e.code)

else:

# everything is fine

 

 

 

 

 

 

 

 

 

 

 

 

 

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值