urllib2下载网页的几种实现方式
#!/usr/bin/env python
#coding:utf-8
import urllib2
import cookielib
url = "https://www.crummy.com/software/BeautifulSoup/bs4/doc.zh/#beautiful-soup-4-2-0"
def first():
#直接调用urllib2的urlopen方法打开
print "======first====="
response1 = urllib2.urlopen(url)
print response1.getcode()
print len(response1.read())
def second():
print "======second====="
#添加请求头,可以将爬虫伪装成浏览器
request = urllib2.Request(url)
request.add_header('user-agent', 'Mozilla/5.0')
response2 = urllib2.urlopen(url)
print response2.getcode()
print len(response2.read())
def third():
#添加特殊情景的处理器,大概4种情况,有些网页需要cookie,需要代理,需要https支持,需要重定向。这里是引入cookie的情况。
print "======third====="
#创建cookie容器
cj = cookielib.CookieJar()
#创建opener
opener =urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
#给urllib2安装opener
urllib2.install_opener(opener)
response3 = urllib2.urlopen(url)
print response3.getcode()
print cj
print response3.read()
first()
second()
third()