Python爬虫基本使用

1、引入urllib库。

2、发起请求。

3、读取返回的内容。

4、编码设置。(b'为二进制编码,需要转化为utf-8)

5、打印出来。

import urllib.request
response=urllib.request.urlopen("http://www.baidu.com")
html=response.read()
html=html.decode("utf-8")
print(html)

二、下载图片并保存到本地

import urllib.request

#****this is the first way***
#response = urllib.request.urlopen("https://img6.bdstatic.com/img/image/smallpic/weiju112.jpg")

#****this is the second way***
req = urllib.request.Request("https://img6.bdstatic.com/img/image/smallpic/weiju112.jpg")
response=urllib.request.urlopen(req)

cat_img = response.read()

with open('aaaabbbbcccc.jpg','wb') as f:
    f.write(cat_img)
3、有道翻译

import urllib.request
import urllib.parse
import json

content=input("Please input the content that you will translate:")

url='http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule&smartresult=ugc&sessionFrom=https://www.baidu.com/link'

data={}
data['action']='FY_BY_CLICKBUTTON'
data['doctype']='json'
data['i']=content
data['keyfrom']='fanyi.web'
data['type']='auto'
data['typoResult']='true'
data['ue']='UTF-8'
data['xmlVersion']='1.8'

data=urllib.parse.urlencode(data).encode("utf-8") 
response=urllib.request.urlopen(url,data)
html=response.read().decode('utf-8')

res=json.loads(html) #res is a direct
print("The result:%s" % (res['translateResult'][0][0]['tgt']))
4、有道翻译增加头部信息(1)(通过增加header信息参数,创建头部字典)。

import urllib.request
import urllib.parse
import json

content=input("Please input the content that you will translate:")

url='http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule&smartresult=ugc&sessionFrom=https://www.baidu.com/link'

head={} # the info of req.header to imitate the Agent just like visiting the website by browser
head['User-Agent']="Mozilla/5.0 (Windows NT 6.3; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0"

data={}
data['action']='FY_BY_CLICKBUTTON'
data['doctype']='json'
data['i']=content
data['keyfrom']='fanyi.web'
data['type']='auto'
data['typoResult']='true'
data['ue']='UTF-8'
data['xmlVersion']='1.8'

data=urllib.parse.urlencode(data).encode("utf-8") 

#response=urllib.request.urlopen(url,data)
req=urllib.request.Request(url,data,head)
response=urllib.request.urlopen(req)

html=response.read().decode('utf-8')

res=json.loads(html) #res is a direct
print("The result:%s" % (res['translateResult'][0][0]['tgt']))

5、有道翻译增加头部信息(2)(通过Request.add_header())。

import urllib.request
import urllib.parse
import json

content=input("Please input the content that you will translate:")

url='http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule&smartresult=ugc&sessionFrom=https://www.baidu.com/link'

'''
head={} # the info of req.header to imitate the Agent just like visiting the website by browser
head['User-Agent']="Mozilla/5.0 (Windows NT 6.3; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0"
'''

data={}
data['action']='FY_BY_CLICKBUTTON'
data['doctype']='json'
data['i']=content
data['keyfrom']='fanyi.web'
data['type']='auto'
data['typoResult']='true'
data['ue']='UTF-8'
data['xmlVersion']='1.8'

data=urllib.parse.urlencode(data).encode("utf-8") 

#response=urllib.request.urlopen(url,data)
req=urllib.request.Request(url,data)
req.add_header('User-Agent',"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0")
response=urllib.request.urlopen(req)

html=response.read().decode('utf-8')

res=json.loads(html) #res is a direct
print("The result:%s" % (res['translateResult'][0][0]['tgt']))
7、使用代理。

1、创建参数字典{‘type’:'proxy ip':'port'}

proxy_support=urllib.request.ProxyHandler({})

2、 定制、创建opener。

opener=urllib.request.build_opener(proxy_support)
3、安装opener

urllibrequestinstall_opener(opener)

4、调用opener。

opener.open(url)

代码如下

import urllib.request
import random
import time
while True:
    url='http://www.whatismyip.com.tw' #a website that can requery the ip of your device
    iplist=['171.39.32.171:9999','112.245.170.47:9999','111.76.129.119:808','27.206.143.225:9999','114.138.196.144:9999'] #it shuld include the ip:port

    #1、创建参数字典{‘type’:'proxy ip':'port'}
    proxy_support=urllib.request.ProxyHandler({'http':random.choice(iplist)})
    #proxy_support=urllib.request.ProxyHandler({'http':'123.163.219.132:81'})

    #2、 定制、创建opener。
    opener=urllib.request.build_opener(proxy_support)
    opener.addheaders=[('User-Agent','Mozilla/5.0 (Windows NT 6.3; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0')]

    #3、安装opener
    urllib.request.install_opener(opener)

    res=urllib.request.urlopen(url)
    html=res.read().decode('utf-8')
    print(html)
    time.sleep(5)



  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值